language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 2879,
"end": 5652
} | class ____:
def __init__(self, *children, **kwparams):
"""
Create a node
### Parameters
- **children**: One or more sub-conditions. These can be additional
`intersect`, `disjunct`, `union`, `optional`, or any other `Node`
type.
The semantics of multiple conditions are dependent on the type of
query. For an `intersection` node, this amounts to a logical AND,
for a `union` node, this amounts to a logical `OR`.
- **kwparams**: key-value parameters. Each key is the name of a field,
and the value should be a field value. This can be one of the
following:
- Simple string (for text field matches)
- value returned by one of the helper functions
- list of either a string or a value
### Examples
Field `num` should be between 1 and 10
```
intersect(num=between(1, 10)
```
Name can either be `bob` or `john`
```
union(name=("bob", "john"))
```
Don't select countries in Israel, Japan, or US
```
disjunct_union(country=("il", "jp", "us"))
```
"""
self.params = []
kvparams = {}
for k, v in kwparams.items():
curvals = kvparams.setdefault(k, [])
if isinstance(v, (str, int, float)):
curvals.append(Value.make_value(v))
elif isinstance(v, Value):
curvals.append(v)
else:
curvals.extend(Value.make_value(subv) for subv in v)
self.params += [Node.to_node(p) for p in children]
for k, v in kvparams.items():
self.params.extend(self.join_fields(k, v))
def join_fields(self, key, vals):
if len(vals) == 1:
return [BaseNode(f"@{key}:{vals[0].to_string()}")]
if not vals[0].combinable:
return [BaseNode(f"@{key}:{v.to_string()}") for v in vals]
s = BaseNode(f"@{key}:({self.JOINSTR.join(v.to_string() for v in vals)})")
return [s]
@classmethod
def to_node(cls, obj): # noqa
if isinstance(obj, Node):
return obj
return BaseNode(obj)
@property
def JOINSTR(self):
raise NotImplementedError()
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
pre, post = ("(", ")") if with_parens else ("", "")
return f"{pre}{self.JOINSTR.join(n.to_string() for n in self.params)}{post}"
def _should_use_paren(self, optval):
if optval is not None:
return optval
return len(self.params) > 1
def __str__(self):
return self.to_string()
| Node |
python | numba__numba | numba/cuda/cudadrv/devices.py | {
"start": 1705,
"end": 2578
} | class ____(object):
"""
Provides a context manager for executing in the context of the chosen
device. The normal use of instances of this type is from
``numba.cuda.gpus``. For example, to execute on device 2::
with numba.cuda.gpus[2]:
d_a = numba.cuda.to_device(a)
to copy the array *a* onto device 2, referred to by *d_a*.
"""
def __init__(self, device):
self._device = device
def __getattr__(self, item):
return getattr(self._device, item)
def __enter__(self):
_runtime.get_or_create_context(self._device.id)
def __exit__(self, exc_type, exc_val, exc_tb):
# this will verify that we are popping the right device context.
self._device.get_primary_context().pop()
def __str__(self):
return "<Managed Device {self.id}>".format(self=self)
| _DeviceContextManager |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 162193,
"end": 162943
} | class ____(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
| ParallelThreadIdNode |
python | ray-project__ray | python/ray/data/_internal/memory_tracing.py | {
"start": 2070,
"end": 5861
} | class ____:
def __init__(self):
self.allocated: Dict[ray.ObjectRef, dict] = {}
self.deallocated: Dict[ray.ObjectRef, dict] = {}
self.skip_dealloc: Dict[ray.ObjectRef, str] = {}
self.peak_mem = 0
self.cur_mem = 0
def trace_alloc(self, ref: List[ray.ObjectRef], loc: str):
ref = ref[0] # Avoid Ray materializing the ref.
if ref not in self.allocated:
meta = ray.experimental.get_object_locations([ref])
size_bytes = meta.get("object_size", 0)
if not size_bytes:
size_bytes = -1
from ray import cloudpickle as pickle
try:
obj = ray.get(ref, timeout=5.0)
size_bytes = len(pickle.dumps(obj))
except Exception:
print("[mem_tracing] ERROR getting size")
size_bytes = -1
print(f"[mem_tracing] Allocated {size_bytes} bytes at {loc}: {ref}")
entry = {
"size_bytes": size_bytes,
"loc": loc,
}
self.allocated[ref] = entry
self.cur_mem += size_bytes
self.peak_mem = max(self.cur_mem, self.peak_mem)
def trace_dealloc(self, ref: List[ray.ObjectRef], loc: str, freed: bool):
ref = ref[0] # Avoid Ray materializing the ref.
size_bytes = self.allocated.get(ref, {}).get("size_bytes", 0)
if freed:
print(f"[mem_tracing] Freed {size_bytes} bytes at {loc}: {ref}")
if ref in self.allocated:
self.cur_mem -= size_bytes
self.deallocated[ref] = self.allocated.pop(ref)
self.deallocated[ref]["dealloc_loc"] = loc
if ref in self.deallocated:
# This object reference is already deallocated.
pass
else:
print(f"[mem_tracing] WARNING: allocation of {ref} was not traced!")
else:
print(f"[mem_tracing] Skipped freeing {size_bytes} bytes at {loc}: {ref}")
self.skip_dealloc[ref] = loc
def leak_report(self) -> str:
output = StringIO()
output.write("[mem_tracing] ===== Leaked objects =====\n")
for ref in self.allocated:
size_bytes = self.allocated[ref].get("size_bytes")
loc = self.allocated[ref].get("loc")
if ref in self.skip_dealloc:
dealloc_loc = self.skip_dealloc[ref]
output.write(
f"[mem_tracing] Leaked object, created at {loc}, size "
f"{size_bytes}, skipped dealloc at {dealloc_loc}: {ref}\n"
)
else:
output.write(
f"[mem_tracing] Leaked object, created at {loc}, "
f"size {size_bytes}: {ref}\n"
)
output.write("[mem_tracing] ===== End leaked objects =====\n")
output.write("[mem_tracing] ===== Freed objects =====\n")
for ref in self.deallocated:
size_bytes = self.deallocated[ref].get("size_bytes")
loc = self.deallocated[ref].get("loc")
dealloc_loc = self.deallocated[ref].get("dealloc_loc")
output.write(
f"[mem_tracing] Freed object from {loc} at {dealloc_loc}, "
f"size {size_bytes}: {ref}\n"
)
output.write("[mem_tracing] ===== End freed objects =====\n")
output.write(f"[mem_tracing] Peak size bytes {self.peak_mem}\n")
output.write(f"[mem_tracing] Current size bytes {self.cur_mem}\n")
return output.getvalue()
def _get_mem_actor():
return _MemActor.options(
name="mem_tracing_actor", get_if_exists=True, lifetime="detached"
).remote()
| _MemActor |
python | lazyprogrammer__machine_learning_examples | cnn_class/cifar.py | {
"start": 2288,
"end": 3001
} | class ____(object):
def __init__(self, mi, mo, fw=5, fh=5, poolsz=(2, 2)):
# mi = input feature map size
# mo = output feature map size
sz = (mo, mi, fw, fh)
W0 = init_filter(sz, poolsz)
self.W = theano.shared(W0)
b0 = np.zeros(mo, dtype=np.float32)
self.b = theano.shared(b0)
self.poolsz = poolsz
self.params = [self.W, self.b]
def forward(self, X):
conv_out = conv2d(input=X, filters=self.W)
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=self.poolsz,
ignore_border=True
)
return T.nnet.relu(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
| ConvPoolLayer |
python | pypa__pip | src/pip/_vendor/urllib3/response.py | {
"start": 1570,
"end": 1669
} | class ____(object):
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
| GzipDecoderState |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/scheduler.py | {
"start": 1026,
"end": 1921
} | class ____(
NamedTuple(
"SchedulerDebugInfo",
[
("errors", Sequence[str]),
("scheduler_config_info", str),
("scheduler_info", str),
("schedule_storage", Sequence[str]),
],
)
):
def __new__(
cls,
errors: Sequence[str],
scheduler_config_info: str,
scheduler_info: str,
schedule_storage: Sequence[str],
):
return super().__new__(
cls,
errors=check.sequence_param(errors, "errors", of_type=str),
scheduler_config_info=check.str_param(scheduler_config_info, "scheduler_config_info"),
scheduler_info=check.str_param(scheduler_info, "scheduler_info"),
schedule_storage=check.sequence_param(
schedule_storage, "schedule_storage", of_type=str
),
)
@public
| SchedulerDebugInfo |
python | langchain-ai__langchain | libs/standard-tests/langchain_tests/integration_tests/cache.py | {
"start": 3821,
"end": 7503
} | class ____(BaseStandardTests):
"""Test suite for checking the `BaseCache` API of a caching layer for LLMs.
Verifies the basic caching API of a caching layer for LLMs.
The test suite is designed for synchronous caching layers.
Implementers should subclass this test suite and provide a fixture that returns an
empty cache for each test.
"""
@abstractmethod
@pytest.fixture
async def cache(self) -> BaseCache:
"""Get the cache class to test.
The returned cache should be EMPTY.
"""
def get_sample_prompt(self) -> str:
"""Return a sample prompt for testing."""
return "Sample prompt for testing."
def get_sample_llm_string(self) -> str:
"""Return a sample LLM string for testing."""
return "Sample LLM string configuration."
def get_sample_generation(self) -> Generation:
"""Return a sample `Generation` object for testing."""
return Generation(
text="Sample generated text.",
generation_info={"reason": "test"},
)
async def test_cache_is_empty(self, cache: BaseCache) -> None:
"""Test that the cache is empty."""
assert (
await cache.alookup(self.get_sample_prompt(), self.get_sample_llm_string())
is None
)
async def test_update_cache(self, cache: BaseCache) -> None:
"""Test updating the cache."""
prompt = self.get_sample_prompt()
llm_string = self.get_sample_llm_string()
generation = self.get_sample_generation()
await cache.aupdate(prompt, llm_string, [generation])
assert await cache.alookup(prompt, llm_string) == [generation]
async def test_cache_still_empty(self, cache: BaseCache) -> None:
"""Test that the cache is still empty.
This test should follow a test that updates the cache.
This just verifies that the fixture is set up properly to be empty after each
test.
"""
assert (
await cache.alookup(self.get_sample_prompt(), self.get_sample_llm_string())
is None
)
async def test_clear_cache(self, cache: BaseCache) -> None:
"""Test clearing the cache."""
prompt = self.get_sample_prompt()
llm_string = self.get_sample_llm_string()
generation = self.get_sample_generation()
await cache.aupdate(prompt, llm_string, [generation])
await cache.aclear()
assert await cache.alookup(prompt, llm_string) is None
async def test_cache_miss(self, cache: BaseCache) -> None:
"""Test cache miss."""
assert (
await cache.alookup("Nonexistent prompt", self.get_sample_llm_string())
is None
)
async def test_cache_hit(self, cache: BaseCache) -> None:
"""Test cache hit."""
prompt = self.get_sample_prompt()
llm_string = self.get_sample_llm_string()
generation = self.get_sample_generation()
await cache.aupdate(prompt, llm_string, [generation])
assert await cache.alookup(prompt, llm_string) == [generation]
async def test_update_cache_with_multiple_generations(
self,
cache: BaseCache,
) -> None:
"""Test updating the cache with multiple `Generation` objects."""
prompt = self.get_sample_prompt()
llm_string = self.get_sample_llm_string()
generations = [
self.get_sample_generation(),
Generation(text="Another generated text."),
]
await cache.aupdate(prompt, llm_string, generations)
assert await cache.alookup(prompt, llm_string) == generations
| AsyncCacheTestSuite |
python | kamyu104__LeetCode-Solutions | Python/maximum-profit-in-job-scheduling.py | {
"start": 614,
"end": 1172
} | class ____(object):
def jobScheduling(self, startTime, endTime, profit):
"""
:type startTime: List[int]
:type endTime: List[int]
:type profit: List[int]
:rtype: int
"""
min_heap = zip(startTime, endTime, profit)
heapq.heapify(min_heap)
result = 0
while min_heap:
s, e, p = heapq.heappop(min_heap)
if s < e:
heapq.heappush(min_heap, (e, s, result+p))
else:
result = max(result, p)
return result
| Solution |
python | streamlit__streamlit | lib/streamlit/runtime/uploaded_file_manager.py | {
"start": 1129,
"end": 1295
} | class ____(NamedTuple):
"""Information we provide for single file in get_upload_urls."""
file_id: str
upload_url: str
delete_url: str
| UploadFileUrlInfo |
python | mlflow__mlflow | tests/store/artifact/test_databricks_artifact_repo.py | {
"start": 65823,
"end": 68109
} | class ____:
def __init__(self, content: bytes):
self.content = content
def iter_content(self, chunk_size):
yield self.content
def close(self):
pass
def raise_for_status(self):
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
@pytest.mark.parametrize(
"cred_type",
[
ArtifactCredentialType.AZURE_SAS_URI,
ArtifactCredentialType.AZURE_ADLS_GEN2_SAS_URI,
ArtifactCredentialType.AWS_PRESIGNED_URL,
ArtifactCredentialType.GCP_SIGNED_URL,
],
)
def test_download_trace_data(databricks_artifact_repo_trace, cred_type):
cred_info = ArtifactCredentialInfo(
signed_uri=MOCK_AWS_SIGNED_URI,
type=cred_type,
)
cred = GetCredentialsForTraceDataUpload.Response(credential_info=cred_info)
with (
mock.patch(
f"{DATABRICKS_ARTIFACT_REPOSITORY_RESOURCES}._Trace.call_endpoint",
return_value=cred,
),
mock.patch("requests.Session.request", return_value=MockResponse(b'{"spans": []}')),
):
trace_data = databricks_artifact_repo_trace.download_trace_data()
assert TraceData.from_dict(trace_data) == TraceData(spans=[])
@pytest.mark.parametrize(
"cred_type",
[
ArtifactCredentialType.AZURE_SAS_URI,
ArtifactCredentialType.AZURE_ADLS_GEN2_SAS_URI,
ArtifactCredentialType.AWS_PRESIGNED_URL,
ArtifactCredentialType.GCP_SIGNED_URL,
],
)
def test_upload_trace_data(databricks_artifact_repo_trace, cred_type):
cred_info = ArtifactCredentialInfo(signed_uri=MOCK_AWS_SIGNED_URI, type=cred_type)
with (
mock.patch(
f"{DATABRICKS_ARTIFACT_REPOSITORY_RESOURCES}._Trace.get_credentials",
return_value=([cred_info], None),
),
mock.patch("requests.Session.request", return_value=MockResponse(b"{}")),
mock.patch.object(databricks_artifact_repo_trace, "chunk_thread_pool") as mock_thread_pool,
):
trace_data = json.dumps({"spans": [], "request": None, "response": None})
databricks_artifact_repo_trace.upload_trace_data(trace_data)
# Verify that threading is not used in upload_trace_data
mock_thread_pool.submit.assert_not_called()
| MockResponse |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_dags.py | {
"start": 1732,
"end": 15630
} | class ____(TestPublicDagEndpoint):
@pytest.fixture(autouse=True)
@provide_session
def setup_dag_runs(self, session=None) -> None:
# Create DAG Runs
for dag_id in [DAG1_ID, DAG2_ID, DAG3_ID, DAG4_ID, DAG5_ID]:
dag_runs_count = 5 if dag_id in [DAG1_ID, DAG2_ID] else 2
for i in range(dag_runs_count):
start_date = pendulum.datetime(2021 + i, 1, 1, 0, 0, 0, tz="UTC")
dag_run = DagRun(
dag_id=dag_id,
run_id=f"run_id_{i + 1}",
run_type=DagRunType.MANUAL,
start_date=start_date,
logical_date=start_date,
run_after=start_date,
state=(DagRunState.FAILED if i % 2 == 0 else DagRunState.SUCCESS),
triggered_by=DagRunTriggeredByType.TEST,
)
if dag_run.start_date is not None:
dag_run.end_date = dag_run.start_date + pendulum.duration(hours=1)
session.add(dag_run)
session.commit()
@pytest.mark.parametrize(
("query_params", "expected_ids", "expected_total_dag_runs"),
[
# Filters
({}, [DAG1_ID, DAG2_ID], 11),
({"limit": 1}, [DAG1_ID], 2),
({"offset": 1}, [DAG1_ID, DAG2_ID], 11),
({"tags": ["example"]}, [DAG1_ID], 6),
({"exclude_stale": False}, [DAG1_ID, DAG2_ID, DAG3_ID], 15),
({"paused": True, "exclude_stale": False}, [DAG3_ID], 4),
({"paused": False}, [DAG1_ID, DAG2_ID], 11),
({"owners": ["airflow"]}, [DAG1_ID, DAG2_ID], 11),
({"owners": ["test_owner"], "exclude_stale": False}, [DAG3_ID], 4),
({"dag_ids": [DAG1_ID]}, [DAG1_ID], 6),
({"dag_ids": [DAG1_ID, DAG2_ID]}, [DAG1_ID, DAG2_ID], 11),
({"last_dag_run_state": "success", "exclude_stale": False}, [DAG1_ID, DAG2_ID, DAG3_ID], 6),
({"last_dag_run_state": "failed", "exclude_stale": False}, [DAG1_ID, DAG2_ID, DAG3_ID], 9),
# Search
({"dag_id_pattern": "1"}, [DAG1_ID], 6),
({"dag_display_name_pattern": "test_dag2"}, [DAG2_ID], 5),
# Bundle filters
({"bundle_name": "dag_maker"}, [DAG1_ID, DAG2_ID], 11),
({"bundle_name": "wrong_bundle"}, [], 0),
({"bundle_version": "some_commit_hash"}, [DAG1_ID, DAG2_ID], 11),
({"bundle_version": "wrong_version"}, [], 0),
({"bundle_name": "dag_maker", "bundle_version": "some_commit_hash"}, [DAG1_ID, DAG2_ID], 11),
({"bundle_name": "dag_maker", "bundle_version": "wrong_version"}, [], 0),
({"bundle_name": "wrong_bundle", "bundle_version": "some_commit_hash"}, [], 0),
],
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_should_return_200(self, test_client, query_params, expected_ids, expected_total_dag_runs):
response = test_client.get("/dags", params=query_params)
assert response.status_code == 200
body = response.json()
required_dag_run_key = [
"dag_id",
"run_id",
"state",
"run_after",
"start_date",
"end_date",
"logical_date",
]
for recent_dag_runs in body["dags"]:
dag_runs = recent_dag_runs["latest_dag_runs"]
# check date ordering
previous_run_after = None
for dag_run in dag_runs:
# validate the response
for key in required_dag_run_key:
assert key in dag_run
if previous_run_after:
assert previous_run_after > dag_run["run_after"]
previous_run_after = dag_run["run_after"]
@pytest.fixture
def setup_hitl_data(self, create_task_instance: TaskInstance, session: Session):
"""Setup HITL test data for parametrized tests."""
# 3 Dags (test_dag0 created here and test_dag1, test_dag2 created in setup_dag_runs)
# 5 task instances in test_dag0
TI_COUNT = 5
tis = [
create_task_instance(
dag_id="test_dag0",
run_id=f"hitl_run_{ti_i}",
task_id=f"test_task_{ti_i}",
session=session,
state=TaskInstanceState.DEFERRED,
)
for ti_i in range(TI_COUNT)
]
session.add_all(tis)
session.commit()
# test_dag_0 has 3 HITL details not responded, 2 already responded
# test_dag_0 has 3 HITL details that have not been responded to, while 2 have already received responses.
hitl_detail_models = [
HITLDetail(
ti_id=tis[i].id,
options=["Approve", "Reject"],
subject=f"This is subject {i}",
defaults=["Approve"],
)
for i in range(3)
] + [
HITLDetail(
ti_id=tis[i].id,
options=["Approve", "Reject"],
subject=f"This is subject {i}",
defaults=["Approve"],
responded_at=utcnow(),
chosen_options=["Approve"],
responded_by={"id": "test", "name": "test"},
)
for i in range(3, 5)
]
session.add_all(hitl_detail_models)
session.commit()
@pytest.mark.parametrize(
("has_pending_actions", "expected_total_entries", "expected_pending_actions"),
[
# Without has_pending_actions param, should query all DAGs
(None, 3, None),
# With has_pending_actions=True, should only query DAGs with pending actions
(
True,
1,
[
{
"task_instance": mock.ANY,
"options": ["Approve", "Reject"],
"subject": f"This is subject {i}",
"defaults": ["Approve"],
"multiple": False,
"params": {},
"params_input": {},
"response_received": False,
"assigned_users": [],
"created_at": mock.ANY,
}
for i in range(3)
],
),
],
)
def test_should_return_200_with_hitl(
self,
test_client: TestClient,
setup_hitl_data,
has_pending_actions,
expected_total_entries,
expected_pending_actions,
):
# Build query params
params = {}
if has_pending_actions is not None:
params["has_pending_actions"] = has_pending_actions
# Make request
response = test_client.get("/dags", params=params)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == expected_total_entries
# Check pending_actions structure when specified
if expected_pending_actions is not None:
for dag_json in body["dags"]:
pending_actions = dag_json["pending_actions"]
pending_actions.sort(key=lambda x: x["subject"])
assert pending_actions == expected_pending_actions
def test_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/dags", params={})
assert response.status_code == 401
def test_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/dags", params={})
assert response.status_code == 403
def test_get_dags_no_n_plus_one_queries(self, session, test_client):
"""Test that fetching DAGs with tags doesn't trigger n+1 queries."""
num_dags = 5
for i in range(num_dags):
dag_id = f"test_dag_queries_ui_{i}"
dag_model = DagModel(
dag_id=dag_id,
bundle_name="dag_maker",
fileloc=f"/tmp/{dag_id}.py",
is_stale=False,
)
session.add(dag_model)
session.flush()
for j in range(3):
tag = DagTag(name=f"tag_ui_{i}_{j}", dag_id=dag_id)
session.add(tag)
session.commit()
session.expire_all()
with count_queries() as result:
response = test_client.get("/dags", params={"limit": 10})
assert response.status_code == 200
body = response.json()
dags_with_our_prefix = [d for d in body["dags"] if d["dag_id"].startswith("test_dag_queries_ui_")]
assert len(dags_with_our_prefix) == num_dags
for dag in dags_with_our_prefix:
assert len(dag["tags"]) == 3
first_query_count = sum(result.values())
# Add more DAGs and verify query count doesn't scale linearly
for i in range(num_dags, num_dags + 3):
dag_id = f"test_dag_queries_ui_{i}"
dag_model = DagModel(
dag_id=dag_id,
bundle_name="dag_maker",
fileloc=f"/tmp/{dag_id}.py",
is_stale=False,
)
session.add(dag_model)
session.flush()
for j in range(3):
tag = DagTag(name=f"tag_ui_{i}_{j}", dag_id=dag_id)
session.add(tag)
session.commit()
session.expire_all()
with count_queries() as result2:
response = test_client.get("/dags", params={"limit": 15})
assert response.status_code == 200
second_query_count = sum(result2.values())
# With n+1, adding 3 DAGs would add ~3 tag queries
# Without n+1, query count should remain nearly identical
assert second_query_count - first_query_count < 3, (
f"Added 3 DAGs but query count increased by {second_query_count - first_query_count} "
f"({first_query_count} → {second_query_count}), suggesting n+1 queries for tags"
)
@pytest.mark.usefixtures("configure_git_connection_for_dag_bundle")
def test_latest_run_should_return_200(self, test_client):
response = test_client.get(f"/dags/{DAG1_ID}/latest_run")
assert response.status_code == 200
body = response.json()
assert body == {
"id": mock.ANY,
"dag_id": "test_dag1",
"run_id": "run_id_5",
"logical_date": "2025-01-01T00:00:00Z",
"run_after": "2025-01-01T00:00:00Z",
"start_date": "2025-01-01T00:00:00Z",
"end_date": "2025-01-01T01:00:00Z",
"state": "failed",
"duration": 3600.0,
}
def test_latest_run_should_response_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(f"/dags/{DAG1_ID}/latest_run")
assert response.status_code == 401
def test_latest_run_should_response_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(f"/dags/{DAG1_ID}/latest_run")
assert response.status_code == 403
@pytest.mark.parametrize(
("query_params", "expected_dag_count"),
[
({"has_asset_schedule": True}, 3),
({"has_asset_schedule": False}, 2),
({"asset_dependency": "test_asset"}, 1),
({"asset_dependency": "dataset"}, 1),
({"asset_dependency": "bucket"}, 1),
({"asset_dependency": "s3://"}, 1),
({"asset_dependency": "nonexistent"}, 0),
({"has_asset_schedule": True, "asset_dependency": "test_asset"}, 1), # Combined filters
({"has_asset_schedule": False, "asset_dependency": "test_asset"}, 0), # No match
],
)
def test_asset_filtering(self, test_client, query_params, expected_dag_count, session):
"""Test asset-based filtering on the UI DAGs endpoint."""
self._create_asset_test_data(session)
response = test_client.get("/dags", params=query_params)
assert response.status_code == 200
body = response.json()
assert body["total_entries"] == expected_dag_count
assert len(body["dags"]) == expected_dag_count
def test_is_favorite_field_with_multiple_favorites(self, test_client, session):
"""Test is_favorite field with multiple DAGs marked as favorites."""
# Mark both DAG1 and DAG2 as favorites
session.add(DagFavorite(user_id="test", dag_id=DAG1_ID))
session.add(DagFavorite(user_id="test", dag_id=DAG2_ID))
session.commit()
response = test_client.get("/dags")
assert response.status_code == 200
body = response.json()
# Count favorites in response
favorite_count = sum(1 for dag in body["dags"] if dag["is_favorite"])
assert favorite_count == 2
# Verify specific DAGs are marked as favorites
dag_favorites = {dag["dag_id"]: dag["is_favorite"] for dag in body["dags"]}
assert dag_favorites[DAG1_ID] is True
assert dag_favorites[DAG2_ID] is True
def test_is_favorite_field_user_specific(self, test_client, session):
"""Test that is_favorite field is user-specific."""
# Mark DAG1 as favorite for a different user
session.add(DagFavorite(user_id="other_user", dag_id=DAG1_ID))
session.commit()
# Request as the test user (not other_user)
response = test_client.get("/dags")
assert response.status_code == 200
body = response.json()
# Verify that DAG1 is not marked as favorite for the test user
dag1_data = next(dag for dag in body["dags"] if dag["dag_id"] == DAG1_ID)
assert dag1_data["is_favorite"] is False
| TestGetDagRuns |
python | Pylons__pyramid | src/pyramid/events.py | {
"start": 3671,
"end": 4093
} | class ____:
"""An instance of this class is emitted as an :term:`event`
whenever :app:`Pyramid` begins to process a new request. The
event instance has an attribute, ``request``, which is a
:term:`request` object. This event class implements the
:class:`pyramid.interfaces.INewRequest` interface."""
def __init__(self, request):
self.request = request
@implementer(INewResponse)
| NewRequest |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 35992,
"end": 36082
} | class ____(SettingsOverrideObject):
_default_class = TranslationBaseForm
| TranslationForm |
python | django__django | tests/inspectdb/models.py | {
"start": 3642,
"end": 3833
} | class ____(models.Model):
text_field = models.TextField(db_collation=test_collation)
class Meta:
required_db_features = {"supports_collation_on_textfield"}
| TextFieldDbCollation |
python | streamlit__streamlit | lib/tests/streamlit/form_test.py | {
"start": 18145,
"end": 23660
} | class ____(DeltaGeneratorTestCase):
"""Test form width and height."""
def test_form_with_stretch_width(self):
"""Test form with width='stretch'."""
with st.form("form_with_stretch", width="stretch"):
st.text_input("Input")
st.form_submit_button("Submit")
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "form_with_stretch"
assert form_proto.width_config.use_stretch
def test_form_with_content_width(self):
"""Test form with width='content'."""
with st.form("form_with_content", width="content"):
st.text_input("Input")
st.form_submit_button("Submit")
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "form_with_content"
assert form_proto.width_config.use_content
def test_form_with_pixel_width(self):
"""Test form with pixel width."""
with st.form("form_with_pixel", width=100):
st.text_input("Input")
st.form_submit_button("Submit")
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "form_with_pixel"
assert form_proto.width_config.pixel_width == 100
def test_form_with_pixel_height(self):
"""Test form with pixel height."""
with st.form("form_with_pixel", height=100):
st.text_input("Input")
st.form_submit_button("Submit")
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "form_with_pixel"
assert form_proto.height_config.pixel_height == 100
def test_form_with_content_height(self):
"""Test form with content height."""
with st.form("form_with_content", height="content"):
st.text_input("Input")
st.form_submit_button("Submit")
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "form_with_content"
assert form_proto.height_config.use_content
def test_form_with_stretch_height(self):
"""Test form with stretch height."""
with st.form("form_with_stretch", height="stretch"):
st.text_input("Input")
st.form_submit_button("Submit")
form_proto = self.get_delta_from_queue(0).add_block
assert form_proto.form.form_id == "form_with_stretch"
assert form_proto.height_config.use_stretch
@parameterized.expand(
[
("invalid", "invalid"),
("negative", -100),
("zero", 0),
("none", None),
("empty_string", ""),
]
)
def test_form_with_invalid_width_and_height(self, name, value):
"""Test form with invalid width values."""
with pytest.raises(StreamlitAPIException):
with st.form(f"form_with_invalid_{name}", width=value):
st.text_input("Input")
st.form_submit_button("Submit")
with pytest.raises(StreamlitAPIException):
with st.form(f"form_with_invalid_{name}", height=value):
st.text_input("Input")
st.form_submit_button("Submit")
# Tests for st.form_submit_button width
def test_form_submit_button_with_content_width(self):
"""Test st.form_submit_button with width='content'."""
with st.form("test_form"):
st.form_submit_button("Submit", width="content")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_form_submit_button_with_stretch_width(self):
"""Test st.form_submit_button with width='stretch'."""
with st.form("test_form"):
st.form_submit_button("Submit", width="stretch")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert el.width_config.use_stretch is True
def test_form_submit_button_with_pixel_width(self):
"""Test st.form_submit_button with pixel width."""
with st.form("test_form"):
st.form_submit_button("Submit", width=250)
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 250
def test_form_submit_button_with_default_width(self):
"""Test st.form_submit_button uses content width by default."""
with st.form("test_form"):
st.form_submit_button("Submit")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
@parameterized.expand(
[
"invalid",
-100,
0,
100.5,
None,
]
)
def test_form_submit_button_with_invalid_width(self, value):
"""Test st.form_submit_button with invalid width values."""
with pytest.raises(StreamlitAPIException):
with st.form("test_form"):
st.form_submit_button("Submit", width=value)
| FormDimensionsTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 448063,
"end": 465170
} | class ____(VegaLiteSchema):
"""
Header schema wrapper.
Headers of row / column channels for faceted plots.
Parameters
----------
format : str, dict, :class:`Dict`, :class:`Format`, :class:`TimeFormatSpecifier`
The text format specifier for formatting number and date/time in labels of guides
(axes, legends, headers) and text marks.
If the format type is ``"number"`` (e.g., for quantitative fields), this is a D3's
`number format pattern string <https://github.com/d3/d3-format#locale_format>`__.
If the format type is ``"time"`` (e.g., for temporal fields), this is either: a)
D3's `time format pattern <https://d3js.org/d3-time-format#locale_format>`__ if you
desire to set a static time format.
b) `dynamic time format specifier object
<https://vega.github.io/vega-lite/docs/format.html#dynamic-time-format>`__ if you
desire to set a dynamic time format that uses different formats depending on the
granularity of the input date (e.g., if the date lies on a year, month, date, hour,
etc. boundary).
When used with a `custom formatType
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__, this
value will be passed as ``format`` alongside ``datum.value`` to the registered
function.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : str
The format type for labels. One of ``"number"``, ``"time"``, or a `registered custom
format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__.
**Default value:**
* ``"time"`` for temporal fields and ordinal and nominal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nominal fields without
``timeUnit``.
labelAlign : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
Horizontal text alignment of header labels. One of ``"left"``, ``"center"``, or
``"right"``.
labelAnchor : :class:`TitleAnchor`, Literal[None, 'start', 'middle', 'end']
The anchor position for placing the labels. One of ``"start"``, ``"middle"``, or
``"end"``. For example, with a label orientation of top these anchor positions map
to a left-, center-, or right-aligned label.
labelAngle : float
The rotation angle of the header labels.
**Default value:** ``0`` for column header, ``-90`` for row header.
labelBaseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
The vertical text baseline for the header labels. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the ``titleLineHeight`` rather than
``titleFontSize`` alone.
labelColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
The color of the header label, can be in hex color code or regular color name.
labelExpr : str
`Vega expression <https://vega.github.io/vega/docs/expressions/>`__ for customizing
labels.
**Note:** The label text and value can be assessed via the ``label`` and ``value``
properties of the header's backing ``datum`` object.
labelFont : str, dict, :class:`ExprRef`
The font of the header label.
labelFontSize : dict, float, :class:`ExprRef`
The font size of the header label, in pixels.
labelFontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style of the header label.
labelFontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight of the header label.
labelLimit : dict, float, :class:`ExprRef`
The maximum length of the header label in pixels. The text value will be
automatically truncated if the rendered size exceeds the limit.
**Default value:** ``0``, indicating no limit
labelLineHeight : dict, float, :class:`ExprRef`
Line height in pixels for multi-line header labels or title text with ``"line-top"``
or ``"line-bottom"`` baseline.
labelOrient : :class:`Orient`, Literal['left', 'right', 'top', 'bottom']
The orientation of the header label. One of ``"top"``, ``"bottom"``, ``"left"`` or
``"right"``.
labelPadding : dict, float, :class:`ExprRef`
The padding, in pixel, between facet header's label and the plot.
**Default value:** ``10``
labels : bool
A boolean flag indicating if labels should be included as part of the header.
**Default value:** ``true``.
orient : :class:`Orient`, Literal['left', 'right', 'top', 'bottom']
Shortcut for setting both labelOrient and titleOrient.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
titleAlign : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
Horizontal text alignment (to the anchor) of header titles.
titleAnchor : :class:`TitleAnchor`, Literal[None, 'start', 'middle', 'end']
The anchor position for placing the title. One of ``"start"``, ``"middle"``, or
``"end"``. For example, with an orientation of top these anchor positions map to a
left-, center-, or right-aligned title.
titleAngle : float
The rotation angle of the header title.
**Default value:** ``0``.
titleBaseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
The vertical text baseline for the header title. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the ``titleLineHeight`` rather than
``titleFontSize`` alone.
**Default value:** ``"middle"``
titleColor : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Color of the header title, can be in hex color code or regular color name.
titleFont : str, dict, :class:`ExprRef`
Font of the header title. (e.g., ``"Helvetica Neue"``).
titleFontSize : dict, float, :class:`ExprRef`
Font size of the header title.
titleFontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style of the header title.
titleFontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
Font weight of the header title. This can be either a string (e.g ``"bold"``,
``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where
``"normal"`` = ``400`` and ``"bold"`` = ``700``).
titleLimit : dict, float, :class:`ExprRef`
The maximum length of the header title in pixels. The text value will be
automatically truncated if the rendered size exceeds the limit.
**Default value:** ``0``, indicating no limit
titleLineHeight : dict, float, :class:`ExprRef`
Line height in pixels for multi-line header title text or title text with
``"line-top"`` or ``"line-bottom"`` baseline.
titleOrient : :class:`Orient`, Literal['left', 'right', 'top', 'bottom']
The orientation of the header title. One of ``"top"``, ``"bottom"``, ``"left"`` or
``"right"``.
titlePadding : dict, float, :class:`ExprRef`
The padding, in pixel, between facet header's title and the label.
**Default value:** ``10``
"""
_schema = {"$ref": "#/definitions/Header"}
def __init__(
self,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelAnchor: Optional[SchemaBase | TitleAnchor_T] = Undefined,
labelAngle: Optional[float] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOrient: Optional[SchemaBase | Orient_T] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labels: Optional[bool] = Undefined,
orient: Optional[SchemaBase | Orient_T] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[SchemaBase | TitleAnchor_T] = Undefined,
titleAngle: Optional[float] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[SchemaBase | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
format=format,
formatType=formatType,
labelAlign=labelAlign,
labelAnchor=labelAnchor,
labelAngle=labelAngle,
labelBaseline=labelBaseline,
labelColor=labelColor,
labelExpr=labelExpr,
labelFont=labelFont,
labelFontSize=labelFontSize,
labelFontStyle=labelFontStyle,
labelFontWeight=labelFontWeight,
labelLimit=labelLimit,
labelLineHeight=labelLineHeight,
labelOrient=labelOrient,
labelPadding=labelPadding,
labels=labels,
orient=orient,
title=title,
titleAlign=titleAlign,
titleAnchor=titleAnchor,
titleAngle=titleAngle,
titleBaseline=titleBaseline,
titleColor=titleColor,
titleFont=titleFont,
titleFontSize=titleFontSize,
titleFontStyle=titleFontStyle,
titleFontWeight=titleFontWeight,
titleLimit=titleLimit,
titleLineHeight=titleLineHeight,
titleOrient=titleOrient,
titlePadding=titlePadding,
**kwds,
)
| Header |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 107888,
"end": 108676
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, start_date: str, access_token: str):
"""Airbyte Source for Intercom.
Documentation can be found at https://docs.airbyte.com/integrations/sources/intercom
Args:
name (str): The name of the destination.
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
access_token (str): Access token for making authenticated requests. See the Intercom docs for more information.
"""
self.start_date = check.str_param(start_date, "start_date")
self.access_token = check.str_param(access_token, "access_token")
super().__init__("Intercom", name)
| IntercomSource |
python | sanic-org__sanic | guide/webapp/display/layouts/models.py | {
"start": 260,
"end": 329
} | class ____(Struct, kw_only=False):
current_version: str
| GeneralConfig |
python | graphql-python__graphene | graphene/types/union.py | {
"start": 274,
"end": 360
} | class ____(BaseOptions):
types = () # type: Iterable[Type[ObjectType]]
| UnionOptions |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 11611,
"end": 13005
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
cluster_id: Optional[str] = Field(
default=None,
description=(
"The canonical identifier for the cluster used by a run. This field is"
" always available for runs on existing clusters. For runs on new clusters,"
" it becomes available once the cluster is created. This value can be used"
" to view logs by browsing to `/#setting/sparkui/$cluster_id/driver-logs`."
" The logs continue to be available after the run completes.\n\nThe"
" response won’t include this field if the identifier is not available yet."
),
examples=["0923-164208-meows279"],
)
spark_context_id: Optional[str] = Field(
default=None,
description=(
"The canonical identifier for the Spark context used by a run. This field"
" is filled in once the run begins execution. This value can be used to"
" view the Spark UI by browsing to"
" `/#setting/sparkui/$cluster_id/$spark_context_id`. The Spark UI continues"
" to be available after the run has completed.\n\nThe response won’t"
" include this field if the identifier is not available yet."
),
)
| ClusterInstance |
python | google__python-fire | fire/test_components_py3.py | {
"start": 1222,
"end": 1405
} | class ____:
@functools.lru_cache()
def lru_cache_in_class(self, arg1):
return arg1
@functools.lru_cache()
def lru_cache_decorated(arg1):
return arg1
| LruCacheDecoratedMethod |
python | tensorflow__tensorflow | tensorflow/python/client/virtual_gpu_test.py | {
"start": 1283,
"end": 7484
} | class ____(object):
def __init__(self,
dim=1000,
num_ops=100,
virtual_devices_per_gpu=None,
device_probabilities=None):
self._dim = dim
self._num_ops = num_ops
if virtual_devices_per_gpu is None:
self._virtual_devices_per_gpu = [3]
else:
self._virtual_devices_per_gpu = virtual_devices_per_gpu
self._visible_device_list = [
i for i in range(len(self._virtual_devices_per_gpu))
]
gpu_devices = [
('/gpu:' + str(i)) for i in range(sum(self._virtual_devices_per_gpu))
]
self.devices = ['/cpu:0'] + gpu_devices
self._num_devices = len(self.devices)
# Each virtual device gets 2GB memory.
self._mem_limits_mb = [
([1 << 11] * i) for i in self._virtual_devices_per_gpu
]
self.config = self._GetSessionConfig()
if device_probabilities is not None:
self._device_probabilities = list(device_probabilities) # Deep copy
for i in range(1, self._num_devices):
self._device_probabilities[i] += self._device_probabilities[i - 1]
else:
# Each device gets same probability to be assigned an operation.
step = 1.0 / self._num_devices
self._device_probabilities = [
(x + 1) * step for x in range(self._num_devices)
]
# To prevent rounding error causing problems.
self._device_probabilities[self._num_devices - 1] = 1.1
logging.info('dim: %d', self._dim)
logging.info('num_ops: %d', self._num_ops)
logging.info('visible_device_list: %s', str(self._visible_device_list))
logging.info('virtual_devices_per_gpu: %s',
str(self._virtual_devices_per_gpu))
logging.info('mem_limits: %s', str(self._mem_limits_mb))
logging.info('devices: %s', str(self.devices))
logging.info('config: %s', text_format.MessageToString(self.config))
logging.info('device_probabilities: %s', str(self._device_probabilities))
# Creates virtual GPU devices
def _GetSessionConfig(self):
virtual_device_gpu_options = config_pb2.GPUOptions(
visible_device_list=','.join(str(d) for d in self._visible_device_list),
experimental=config_pb2.GPUOptions.Experimental(virtual_devices=[
config_pb2.GPUOptions.Experimental.VirtualDevices(
memory_limit_mb=i) for i in self._mem_limits_mb
]))
return config_pb2.ConfigProto(gpu_options=virtual_device_gpu_options)
# Generates a list of 3-tuples, each tuple contains the source and destination
# device index for a binary operation like 'add', like:
# (src_device_1, src_device_2, dst_device)
def _GenerateOperationPlacement(self):
result = []
for unused_i in range(self._num_ops):
op_device = ()
for unused_j in range(3):
random_num = random.random()
for device_index in range(self._num_devices):
if self._device_probabilities[device_index] > random_num:
op_device += (device_index,)
break
result.append(op_device)
return result
# Logs part of the matrix for debugging purposes.
def _LogMatrix(self, mat, dim):
logging.info('---- printing the first 10*10 submatrix ----')
for i in range(min(10, dim)):
row = ''
for j in range(min(10, dim)):
row += ' ' + str(mat[i][j])
logging.info(row)
# Runs a list of 'add' operations where each operation satisfies the device
# placement constraints in `op_placement`, and returns the result.
def _TestRandomGraphWithDevices(self,
sess,
seed,
op_placement,
devices,
debug_mode=False):
data = []
shape = (self._dim, self._dim)
feed_dict = {}
# Initialize the matrices
for i in range(len(devices)):
with ops.device(devices[i]):
var = array_ops.placeholder(dtypes.float32, shape=shape)
np.random.seed(seed + i)
feed_dict[var] = np.random.uniform(
low=0, high=0.1, size=shape).astype(np.float32)
data.append(var)
# Run the 'add' operations on those matrices
for op in op_placement:
with ops.device(devices[op[2]]):
data[op[2]] = math_ops.add(data[op[0]], data[op[1]])
with ops.device('/cpu:0'):
s = data[0]
for i in range(1, len(data)):
s = math_ops.add(s, data[i])
if debug_mode:
logging.info(ops.get_default_graph().as_graph_def())
result = sess.run(s, feed_dict=feed_dict)
self._LogMatrix(result, self._dim)
return result
# Generates a random graph with `self._num_ops` 'add' operations with each
# operation placed on different virtual device, test that the result is
# identical to the result obtained by running the same graph on cpu only.
def TestRandomGraph(self, sess, op_placement=None, random_seed=None):
debug_mode = False
if op_placement is None:
op_placement = self._GenerateOperationPlacement()
else:
debug_mode = True
if random_seed is None:
random_seed = random.randint(0, 1 << 31)
else:
debug_mode = True
logging.info('Virtual gpu functional test for random graph...')
logging.info('operation placement: %s', str(op_placement))
logging.info('random seed: %d', random_seed)
# Run with multiple virtual gpus.
result_vgd = self._TestRandomGraphWithDevices(
sess, random_seed, op_placement, self.devices, debug_mode=debug_mode)
# Run with single cpu.
result_cpu = self._TestRandomGraphWithDevices(
sess,
random_seed,
op_placement, ['/cpu:0'] * self._num_devices,
debug_mode=debug_mode)
# Test the result
for i in range(self._dim):
for j in range(self._dim):
if result_vgd[i][j] != result_cpu[i][j]:
logging.error(
'Result mismatch at row %d column %d: expected %f, actual %f', i,
j, result_cpu[i][j], result_vgd[i][j])
logging.error('Devices: %s', self.devices)
logging.error('Memory limits (in MB): %s', self._mem_limits_mb)
return False
return True
| VirtualGpuTestUtil |
python | graphql-python__graphene | graphene/validation/disable_introspection.py | {
"start": 183,
"end": 539
} | class ____(ValidationRule):
def enter_field(self, node: FieldNode, *_args):
field_name = node.name.value
if is_introspection_key(field_name):
self.report_error(
GraphQLError(
f"Cannot query '{field_name}': introspection is disabled.", node
)
)
| DisableIntrospection |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass9.py | {
"start": 193,
"end": 340
} | class ____(ABC):
@property
@abstractmethod
def myproperty(self) -> str: ...
MixinB = NamedTuple("MixinB", [("myproperty", str)])
| ClassA |
python | facebook__pyre-check | client/commands/query_response.py | {
"start": 664,
"end": 1517
} | class ____:
payload: object
@staticmethod
def from_json(
response_json: object,
) -> Response:
if (
isinstance(response_json, list)
and len(response_json) > 1
and response_json[0] == "Query"
):
return Response(response_json[1])
else:
raise InvalidQueryResponse(
f"Unexpected JSON response from server: {response_json}"
)
@staticmethod
def parse(
response_text: str,
) -> Response:
try:
response_json = json.loads(response_text)
return Response.from_json(response_json)
except json.JSONDecodeError as decode_error:
message = f"Cannot parse response as JSON: {decode_error}"
raise InvalidQueryResponse(message) from decode_error
| Response |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-zyte-serp/llama_index/readers/zyte_serp/base.py | {
"start": 235,
"end": 2249
} | class ____(BasePydanticReader):
"""
Get google search results URLs for a search query.
Args:
api_key: Zyte API key.
extract_from: Determines the mode while extracting the search results.
It can take one of the following values: 'httpResponseBody', 'browserHtml'
Example:
.. code-block:: python
from llama_index.readers.zyte_serp import ZyteSerpReader
reader = ZyteSerpReader(
api_key="ZYTE_API_KEY",
)
docs = reader.load_data(
"search query",
)
Zyte-API reference:
https://docs.zyte.com/zyte-api/get-started.html
"""
client: ZyteAPI
api_key: str
extract_from: Optional[str]
def __init__(
self,
api_key: str,
extract_from: Optional[str] = None,
) -> None:
"""Initialize with file path."""
user_agent = f"llama-index-zyte-api/{PYTHON_ZYTE_API_USER_AGENT}"
client = ZyteAPI(
api_key=api_key,
user_agent=user_agent,
)
super().__init__(
api_key=api_key,
extract_from=extract_from,
client=client,
)
def _serp_url(self, query: str):
from urllib.parse import quote_plus
base_url = "https://www.google.com/search?q="
return base_url + quote_plus(query)
def load_data(self, query: str):
serp_url = self._serp_url(query)
serp_request = {
"url": serp_url,
"serp": True,
}
if self.extract_from:
serp_request.update({"serpOptions": {"extractFrom": self.extract_from}})
results = self.client.get(serp_request)
docs = []
for result in results["serp"]["organicResults"]:
doc = Document(
text=result["url"],
metadata={"name": result["name"], "rank": result["rank"]},
)
docs.append(doc)
return docs
| ZyteSerpReader |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 10811,
"end": 11009
} | class ____(collections.abc.Callable[P, ...]):
def __new__(cls: type[Generic4]) -> Generic4: ...
def __enter__(self: Generic4) -> Generic4: ...
from some_module import PotentialTypeVar
| Generic4 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/data_loss_prevention.py | {
"start": 4801,
"end": 5057
} | class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Data Loss Prevention link."""
name = "Cloud DLP Info Type Details"
key = "cloud_dlp_info_type_details_key"
format_str = DLP_INFO_TYPE_DETAILS_LINK
| CloudDLPInfoTypeDetailsLink |
python | doocs__leetcode | solution/2200-2299/2286.Booking Concert Tickets in Groups/Solution.py | {
"start": 132,
"end": 1793
} | class ____:
def __init__(self, n, m):
self.m = m
self.tr = [Node() for _ in range(n << 2)]
self.build(1, 1, n)
def build(self, u, l, r):
self.tr[u].l, self.tr[u].r = l, r
if l == r:
self.tr[u].s = self.tr[u].mx = self.m
return
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
self.pushup(u)
def modify(self, u, x, v):
if self.tr[u].l == x and self.tr[u].r == x:
self.tr[u].s = self.tr[u].mx = v
return
mid = (self.tr[u].l + self.tr[u].r) >> 1
if x <= mid:
self.modify(u << 1, x, v)
else:
self.modify(u << 1 | 1, x, v)
self.pushup(u)
def query_sum(self, u, l, r):
if self.tr[u].l >= l and self.tr[u].r <= r:
return self.tr[u].s
mid = (self.tr[u].l + self.tr[u].r) >> 1
v = 0
if l <= mid:
v += self.query_sum(u << 1, l, r)
if r > mid:
v += self.query_sum(u << 1 | 1, l, r)
return v
def query_idx(self, u, l, r, k):
if self.tr[u].mx < k:
return 0
if self.tr[u].l == self.tr[u].r:
return self.tr[u].l
mid = (self.tr[u].l + self.tr[u].r) >> 1
if self.tr[u << 1].mx >= k:
return self.query_idx(u << 1, l, r, k)
if r > mid:
return self.query_idx(u << 1 | 1, l, r, k)
return 0
def pushup(self, u):
self.tr[u].s = self.tr[u << 1].s + self.tr[u << 1 | 1].s
self.tr[u].mx = max(self.tr[u << 1].mx, self.tr[u << 1 | 1].mx)
| SegmentTree |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 12722,
"end": 12802
} | class ____(PydanticValueError):
msg_template = 'invalid date format'
| DateError |
python | davidhalter__jedi | jedi/inference/lazy_value.py | {
"start": 632,
"end": 803
} | class ____(AbstractLazyValue):
def __init__(self, min=1, max=1):
super().__init__(None, min, max)
def infer(self):
return NO_VALUES
| LazyUnknownValue |
python | walkccc__LeetCode | solutions/2256. Minimum Average Difference/2256.py | {
"start": 0,
"end": 457
} | class ____:
def minimumAverageDifference(self, nums: list[int]) -> int:
n = len(nums)
ans = 0
minDiff = inf
prefix = 0
suffix = sum(nums)
for i, num in enumerate(nums):
prefix += num
suffix -= num
prefixAvg = prefix // (i + 1)
suffixAvg = 0 if i == n - 1 else suffix // (n - 1 - i)
diff = abs(prefixAvg - suffixAvg)
if diff < minDiff:
ans = i
minDiff = diff
return ans
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_device_request_allocation_result.py | {
"start": 383,
"end": 17712
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admin_access': 'bool',
'binding_conditions': 'list[str]',
'binding_failure_conditions': 'list[str]',
'consumed_capacity': 'dict(str, str)',
'device': 'str',
'driver': 'str',
'pool': 'str',
'request': 'str',
'share_id': 'str',
'tolerations': 'list[V1beta2DeviceToleration]'
}
attribute_map = {
'admin_access': 'adminAccess',
'binding_conditions': 'bindingConditions',
'binding_failure_conditions': 'bindingFailureConditions',
'consumed_capacity': 'consumedCapacity',
'device': 'device',
'driver': 'driver',
'pool': 'pool',
'request': 'request',
'share_id': 'shareID',
'tolerations': 'tolerations'
}
def __init__(self, admin_access=None, binding_conditions=None, binding_failure_conditions=None, consumed_capacity=None, device=None, driver=None, pool=None, request=None, share_id=None, tolerations=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceRequestAllocationResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admin_access = None
self._binding_conditions = None
self._binding_failure_conditions = None
self._consumed_capacity = None
self._device = None
self._driver = None
self._pool = None
self._request = None
self._share_id = None
self._tolerations = None
self.discriminator = None
if admin_access is not None:
self.admin_access = admin_access
if binding_conditions is not None:
self.binding_conditions = binding_conditions
if binding_failure_conditions is not None:
self.binding_failure_conditions = binding_failure_conditions
if consumed_capacity is not None:
self.consumed_capacity = consumed_capacity
self.device = device
self.driver = driver
self.pool = pool
self.request = request
if share_id is not None:
self.share_id = share_id
if tolerations is not None:
self.tolerations = tolerations
@property
def admin_access(self):
"""Gets the admin_access of this V1beta2DeviceRequestAllocationResult. # noqa: E501
AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode. This is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled. # noqa: E501
:return: The admin_access of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: bool
"""
return self._admin_access
@admin_access.setter
def admin_access(self, admin_access):
"""Sets the admin_access of this V1beta2DeviceRequestAllocationResult.
AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode. This is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled. # noqa: E501
:param admin_access: The admin_access of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: bool
"""
self._admin_access = admin_access
@property
def binding_conditions(self):
"""Gets the binding_conditions of this V1beta2DeviceRequestAllocationResult. # noqa: E501
BindingConditions contains a copy of the BindingConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:return: The binding_conditions of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: list[str]
"""
return self._binding_conditions
@binding_conditions.setter
def binding_conditions(self, binding_conditions):
"""Sets the binding_conditions of this V1beta2DeviceRequestAllocationResult.
BindingConditions contains a copy of the BindingConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:param binding_conditions: The binding_conditions of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: list[str]
"""
self._binding_conditions = binding_conditions
@property
def binding_failure_conditions(self):
"""Gets the binding_failure_conditions of this V1beta2DeviceRequestAllocationResult. # noqa: E501
BindingFailureConditions contains a copy of the BindingFailureConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:return: The binding_failure_conditions of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: list[str]
"""
return self._binding_failure_conditions
@binding_failure_conditions.setter
def binding_failure_conditions(self, binding_failure_conditions):
"""Sets the binding_failure_conditions of this V1beta2DeviceRequestAllocationResult.
BindingFailureConditions contains a copy of the BindingFailureConditions from the corresponding ResourceSlice at the time of allocation. This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates. # noqa: E501
:param binding_failure_conditions: The binding_failure_conditions of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: list[str]
"""
self._binding_failure_conditions = binding_failure_conditions
@property
def consumed_capacity(self):
"""Gets the consumed_capacity of this V1beta2DeviceRequestAllocationResult. # noqa: E501
ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. The consumed amount may differ from the requested amount: it is rounded up to the nearest valid value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). The total consumed capacity for each device must not exceed the DeviceCapacity's Value. This field is populated only for devices that allow multiple allocations. All capacity entries are included, even if the consumed amount is zero. # noqa: E501
:return: The consumed_capacity of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: dict(str, str)
"""
return self._consumed_capacity
@consumed_capacity.setter
def consumed_capacity(self, consumed_capacity):
"""Sets the consumed_capacity of this V1beta2DeviceRequestAllocationResult.
ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request. The consumed amount may differ from the requested amount: it is rounded up to the nearest valid value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount). The total consumed capacity for each device must not exceed the DeviceCapacity's Value. This field is populated only for devices that allow multiple allocations. All capacity entries are included, even if the consumed amount is zero. # noqa: E501
:param consumed_capacity: The consumed_capacity of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: dict(str, str)
"""
self._consumed_capacity = consumed_capacity
@property
def device(self):
"""Gets the device of this V1beta2DeviceRequestAllocationResult. # noqa: E501
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:return: The device of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""Sets the device of this V1beta2DeviceRequestAllocationResult.
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:param device: The device of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and device is None: # noqa: E501
raise ValueError("Invalid value for `device`, must not be `None`") # noqa: E501
self._device = device
@property
def driver(self):
"""Gets the driver of this V1beta2DeviceRequestAllocationResult. # noqa: E501
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:return: The driver of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1beta2DeviceRequestAllocationResult.
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:param driver: The driver of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def pool(self):
"""Gets the pool of this V1beta2DeviceRequestAllocationResult. # noqa: E501
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:return: The pool of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1beta2DeviceRequestAllocationResult.
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:param pool: The pool of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pool is None: # noqa: E501
raise ValueError("Invalid value for `pool`, must not be `None`") # noqa: E501
self._pool = pool
@property
def request(self):
"""Gets the request of this V1beta2DeviceRequestAllocationResult. # noqa: E501
Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format <main request>/<subrequest>. Multiple devices may have been allocated per request. # noqa: E501
:return: The request of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._request
@request.setter
def request(self, request):
"""Sets the request of this V1beta2DeviceRequestAllocationResult.
Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format <main request>/<subrequest>. Multiple devices may have been allocated per request. # noqa: E501
:param request: The request of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and request is None: # noqa: E501
raise ValueError("Invalid value for `request`, must not be `None`") # noqa: E501
self._request = request
@property
def share_id(self):
"""Gets the share_id of this V1beta2DeviceRequestAllocationResult. # noqa: E501
ShareID uniquely identifies an individual allocation share of the device, used when the device supports multiple simultaneous allocations. It serves as an additional map key to differentiate concurrent shares of the same device. # noqa: E501
:return: The share_id of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: str
"""
return self._share_id
@share_id.setter
def share_id(self, share_id):
"""Sets the share_id of this V1beta2DeviceRequestAllocationResult.
ShareID uniquely identifies an individual allocation share of the device, used when the device supports multiple simultaneous allocations. It serves as an additional map key to differentiate concurrent shares of the same device. # noqa: E501
:param share_id: The share_id of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: str
"""
self._share_id = share_id
@property
def tolerations(self):
"""Gets the tolerations of this V1beta2DeviceRequestAllocationResult. # noqa: E501
A copy of all tolerations specified in the request at the time when the device got allocated. The maximum number of tolerations is 16. This is an alpha field and requires enabling the DRADeviceTaints feature gate. # noqa: E501
:return: The tolerations of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:rtype: list[V1beta2DeviceToleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this V1beta2DeviceRequestAllocationResult.
A copy of all tolerations specified in the request at the time when the device got allocated. The maximum number of tolerations is 16. This is an alpha field and requires enabling the DRADeviceTaints feature gate. # noqa: E501
:param tolerations: The tolerations of this V1beta2DeviceRequestAllocationResult. # noqa: E501
:type: list[V1beta2DeviceToleration]
"""
self._tolerations = tolerations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceRequestAllocationResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceRequestAllocationResult):
return True
return self.to_dict() != other.to_dict()
| V1beta2DeviceRequestAllocationResult |
python | ApeWorX__ape | src/ape_ethereum/provider.py | {
"start": 68024,
"end": 68815
} | class ____(ManagerAccessMixin):
def __init__(self, eth_call_args: list):
self._arguments = eth_call_args
self.contract_type = None
@cached_property
def trace(self) -> CallTrace:
return CallTrace(
tx=self._arguments[0], arguments=self._arguments[1:], use_tokens_for_symbols=True
)
@cached_property
def source_traceback(self) -> Optional[SourceTraceback]:
ct = self.contract_type
if ct is None:
return None
method_id = self._arguments[0].get("data", "")[:10] or None
if ct and method_id:
if contract_src := self.local_project._create_contract_source(ct):
return SourceTraceback.create(contract_src, self.trace, method_id)
return None
| _LazyCallTrace |
python | jazzband__django-oauth-toolkit | tests/test_authorization_code.py | {
"start": 2126,
"end": 2995
} | class ____(BaseTest):
"""
Test to avoid regression for the issue 315: request object
was being reassigned when getting AuthorizationView
"""
def test_request_is_not_overwritten(self):
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
response = self.client.get(
reverse("oauth2_provider:authorize"),
{
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
},
)
self.assertEqual(response.status_code, 200)
assert "request" not in response.context_data
@pytest.mark.oauth2_settings(presets.DEFAULT_SCOPES_RW)
| TestRegressionIssue315 |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_cli_integration.py | {
"start": 10538,
"end": 12384
} | class ____:
# `ray job status` should exit with 0 if the job exists and non-zero if it doesn't.
# This is the contract between Ray and KubRay v1.3.0.
def test_status_job_exists(self, ray_start_stop):
cmd = "echo hello"
job_id = "test_job_id"
_run_cmd(
f"ray job submit --submission-id={job_id} -- bash -c '{cmd}'",
should_fail=False,
)
_run_cmd(f"ray job status {job_id}", should_fail=False)
def test_status_job_does_not_exist(self, ray_start_stop):
job_id = "test_job_id"
_run_cmd(f"ray job status {job_id}", should_fail=True)
def test_quote_escaping(ray_start_stop):
cmd = "echo \"hello 'world'\""
job_id = "test_quote_escaping"
stdout, _ = _run_cmd(
f"ray job submit --job-id={job_id} -- {cmd}",
)
assert "hello 'world'" in stdout
def test_resources(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1}, _memory=4)
# Check the case of too many resources.
for id, arg in [
("entrypoint_num_cpus", "--entrypoint-num-cpus=2"),
("entrypoint_num_gpus", "--entrypoint-num-gpus=2"),
("entrypoint_memory", "--entrypoint-memory=5"),
("entrypoint_resources", "--entrypoint-resources='{\"Custom\": 2}'"),
]:
_run_cmd(f"ray job submit --submission-id={id} --no-wait {arg} -- echo hi")
stdout, _ = _run_cmd(f"ray job status {id}")
assert "waiting for resources" in stdout
# Check the case of sufficient resources.
stdout, _ = _run_cmd(
"ray job submit --entrypoint-num-cpus=1 "
"--entrypoint-num-gpus=1 --entrypoint-memory=4 --entrypoint-resources='{"
'"Custom": 1}\' -- echo hello',
)
assert "hello" in stdout
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestJobStatus |
python | pytest-dev__pytest-mock | src/pytest_mock/plugin.py | {
"start": 1972,
"end": 25297
} | class ____:
"""
Fixture that provides the same interface to functions in the mock module,
ensuring that they are uninstalled at the end of each test.
"""
def __init__(self, config: Any) -> None:
self._mock_cache: MockCache = MockCache()
self.mock_module = mock_module = get_mock_module(config)
self.patch = self._Patcher(self._mock_cache, mock_module) # type: MockerFixture._Patcher
# aliases for convenience
self.Mock = mock_module.Mock
self.MagicMock = mock_module.MagicMock
self.NonCallableMock = mock_module.NonCallableMock
self.NonCallableMagicMock = mock_module.NonCallableMagicMock
self.PropertyMock = mock_module.PropertyMock
if hasattr(mock_module, "AsyncMock"):
self.AsyncMock = mock_module.AsyncMock
self.call = mock_module.call
self.ANY = mock_module.ANY
self.DEFAULT = mock_module.DEFAULT
self.sentinel = mock_module.sentinel
self.mock_open = mock_module.mock_open
if hasattr(mock_module, "seal"):
self.seal = mock_module.seal
def create_autospec(
self, spec: Any, spec_set: bool = False, instance: bool = False, **kwargs: Any
) -> MockType:
m: MockType = self.mock_module.create_autospec(
spec, spec_set, instance, **kwargs
)
self._mock_cache.add(m)
return m
def resetall(
self, *, return_value: bool = False, side_effect: bool = False
) -> None:
"""
Call reset_mock() on all patchers started by this fixture.
:param bool return_value: Reset the return_value of mocks.
:param bool side_effect: Reset the side_effect of mocks.
"""
supports_reset_mock_with_args: tuple[type[Any], ...]
if hasattr(self, "AsyncMock"):
supports_reset_mock_with_args = (self.Mock, self.AsyncMock)
else:
supports_reset_mock_with_args = (self.Mock,)
for mock_item in self._mock_cache:
# See issue #237.
if not hasattr(mock_item.mock, "reset_mock"):
continue
# NOTE: The mock may be a dictionary
if hasattr(mock_item.mock, "spy_return_list"):
mock_item.mock.spy_return_list = []
if hasattr(mock_item.mock, "spy_return_iter"):
mock_item.mock.spy_return_iter = None
if isinstance(mock_item.mock, supports_reset_mock_with_args):
mock_item.mock.reset_mock(
return_value=return_value, side_effect=side_effect
)
else:
mock_item.mock.reset_mock()
def stopall(self) -> None:
"""
Stop all patchers started by this fixture. Can be safely called multiple
times.
"""
self._mock_cache.clear()
def stop(self, mock: unittest.mock.MagicMock) -> None:
"""
Stops a previous patch or spy call by passing the ``MagicMock`` object
returned by it.
"""
self._mock_cache.remove(mock)
def spy(
self, obj: object, name: str, duplicate_iterators: bool = False
) -> MockType:
"""
Create a spy of method. It will run method normally, but it is now
possible to use `mock` call features with it, like call count.
:param obj: An object.
:param name: A method in object.
:param duplicate_iterators: Whether to keep a copy of the returned iterator in `spy_return_iter`.
:return: Spy object.
"""
method = getattr(obj, name)
def wrapper(*args, **kwargs):
spy_obj.spy_return = None
spy_obj.spy_exception = None
try:
r = method(*args, **kwargs)
except BaseException as e:
spy_obj.spy_exception = e
raise
else:
if duplicate_iterators and isinstance(r, Iterator):
r, duplicated_iterator = itertools.tee(r, 2)
spy_obj.spy_return_iter = duplicated_iterator
else:
spy_obj.spy_return_iter = None
spy_obj.spy_return = r
spy_obj.spy_return_list.append(r)
return r
async def async_wrapper(*args, **kwargs):
spy_obj.spy_return = None
spy_obj.spy_exception = None
try:
r = await method(*args, **kwargs)
except BaseException as e:
spy_obj.spy_exception = e
raise
else:
spy_obj.spy_return = r
spy_obj.spy_return_list.append(r)
return r
if inspect.iscoroutinefunction(method):
wrapped = functools.update_wrapper(async_wrapper, method)
else:
wrapped = functools.update_wrapper(wrapper, method)
autospec = inspect.ismethod(method) or inspect.isfunction(method)
spy_obj = self.patch.object(obj, name, side_effect=wrapped, autospec=autospec)
spy_obj.spy_return = None
spy_obj.spy_return_iter = None
spy_obj.spy_return_list = []
spy_obj.spy_exception = None
return spy_obj
def stub(self, name: Optional[str] = None) -> unittest.mock.MagicMock:
"""
Create a stub method. It accepts any arguments. Ideal to register to
callbacks in tests.
:param name: the constructed stub's name as used in repr
:return: Stub object.
"""
return cast(
unittest.mock.MagicMock,
self.mock_module.MagicMock(spec=lambda *args, **kwargs: None, name=name),
)
def async_stub(self, name: Optional[str] = None) -> AsyncMockType:
"""
Create a async stub method. It accepts any arguments. Ideal to register to
callbacks in tests.
:param name: the constructed stub's name as used in repr
:return: Stub object.
"""
return cast(
AsyncMockType,
self.mock_module.AsyncMock(spec=lambda *args, **kwargs: None, name=name),
)
class _Patcher:
"""
Object to provide the same interface as mock.patch, mock.patch.object,
etc. We need this indirection to keep the same API of the mock package.
"""
DEFAULT = object()
def __init__(self, mock_cache, mock_module):
self.__mock_cache = mock_cache
self.mock_module = mock_module
def _start_patch(
self, mock_func: Any, warn_on_mock_enter: bool, *args: Any, **kwargs: Any
) -> MockType:
"""Patches something by calling the given function from the mock
module, registering the patch to stop it later and returns the
mock object resulting from the mock call.
"""
p = mock_func(*args, **kwargs)
mocked: MockType = p.start()
self.__mock_cache.add(mock=mocked, patch=p)
if hasattr(mocked, "reset_mock"):
# check if `mocked` is actually a mock object, as depending on autospec or target
# parameters `mocked` can be anything
if hasattr(mocked, "__enter__") and warn_on_mock_enter:
mocked.__enter__.side_effect = lambda: warnings.warn(
"Mocks returned by pytest-mock do not need to be used as context managers. "
"The mocker fixture automatically undoes mocking at the end of a test. "
"This warning can be ignored if it was triggered by mocking a context manager. "
"https://pytest-mock.readthedocs.io/en/latest/usage.html#usage-as-context-manager",
PytestMockWarning,
stacklevel=5,
)
return mocked
def object(
self,
target: object,
attribute: str,
new: object = DEFAULT,
spec: Optional[object] = None,
create: bool = False,
spec_set: Optional[object] = None,
autospec: Optional[object] = None,
new_callable: object = None,
**kwargs: Any,
) -> MockType:
"""API to mock.patch.object"""
if new is self.DEFAULT:
new = self.mock_module.DEFAULT
return self._start_patch(
self.mock_module.patch.object,
True,
target,
attribute,
new=new,
spec=spec,
create=create,
spec_set=spec_set,
autospec=autospec,
new_callable=new_callable,
**kwargs,
)
def context_manager(
self,
target: builtins.object,
attribute: str,
new: builtins.object = DEFAULT,
spec: Optional[builtins.object] = None,
create: bool = False,
spec_set: Optional[builtins.object] = None,
autospec: Optional[builtins.object] = None,
new_callable: builtins.object = None,
**kwargs: Any,
) -> MockType:
"""This is equivalent to mock.patch.object except that the returned mock
does not issue a warning when used as a context manager."""
if new is self.DEFAULT:
new = self.mock_module.DEFAULT
return self._start_patch(
self.mock_module.patch.object,
False,
target,
attribute,
new=new,
spec=spec,
create=create,
spec_set=spec_set,
autospec=autospec,
new_callable=new_callable,
**kwargs,
)
def multiple(
self,
target: builtins.object,
spec: Optional[builtins.object] = None,
create: bool = False,
spec_set: Optional[builtins.object] = None,
autospec: Optional[builtins.object] = None,
new_callable: Optional[builtins.object] = None,
**kwargs: Any,
) -> dict[str, MockType]:
"""API to mock.patch.multiple"""
return self._start_patch(
self.mock_module.patch.multiple,
True,
target,
spec=spec,
create=create,
spec_set=spec_set,
autospec=autospec,
new_callable=new_callable,
**kwargs,
)
def dict(
self,
in_dict: Union[Mapping[Any, Any], str],
values: Union[Mapping[Any, Any], Iterable[tuple[Any, Any]]] = (),
clear: bool = False,
**kwargs: Any,
) -> Any:
"""API to mock.patch.dict"""
return self._start_patch(
self.mock_module.patch.dict,
True,
in_dict,
values=values,
clear=clear,
**kwargs,
)
@overload
def __call__(
self,
target: str,
new: None = ...,
spec: Optional[builtins.object] = ...,
create: bool = ...,
spec_set: Optional[builtins.object] = ...,
autospec: Optional[builtins.object] = ...,
new_callable: None = ...,
**kwargs: Any,
) -> MockType: ...
@overload
def __call__(
self,
target: str,
new: _T,
spec: Optional[builtins.object] = ...,
create: bool = ...,
spec_set: Optional[builtins.object] = ...,
autospec: Optional[builtins.object] = ...,
new_callable: None = ...,
**kwargs: Any,
) -> _T: ...
@overload
def __call__(
self,
target: str,
new: None,
spec: Optional[builtins.object],
create: bool,
spec_set: Optional[builtins.object],
autospec: Optional[builtins.object],
new_callable: Callable[[], _T],
**kwargs: Any,
) -> _T: ...
@overload
def __call__(
self,
target: str,
new: None = ...,
spec: Optional[builtins.object] = ...,
create: bool = ...,
spec_set: Optional[builtins.object] = ...,
autospec: Optional[builtins.object] = ...,
*,
new_callable: Callable[[], _T],
**kwargs: Any,
) -> _T: ...
def __call__(
self,
target: str,
new: builtins.object = DEFAULT,
spec: Optional[builtins.object] = None,
create: bool = False,
spec_set: Optional[builtins.object] = None,
autospec: Optional[builtins.object] = None,
new_callable: Optional[Callable[[], Any]] = None,
**kwargs: Any,
) -> Any:
"""API to mock.patch"""
if new is self.DEFAULT:
new = self.mock_module.DEFAULT
return self._start_patch(
self.mock_module.patch,
True,
target,
new=new,
spec=spec,
create=create,
spec_set=spec_set,
autospec=autospec,
new_callable=new_callable,
**kwargs,
)
def _mocker(pytestconfig: Any) -> Generator[MockerFixture, None, None]:
"""
Return an object that has the same interface to the `mock` module, but
takes care of automatically undoing all patches after each test method.
"""
result = MockerFixture(pytestconfig)
yield result
result.stopall()
mocker = pytest.fixture()(_mocker) # default scope is function
class_mocker = pytest.fixture(scope="class")(_mocker)
module_mocker = pytest.fixture(scope="module")(_mocker)
package_mocker = pytest.fixture(scope="package")(_mocker)
session_mocker = pytest.fixture(scope="session")(_mocker)
_mock_module_patches: list[Any] = []
_mock_module_originals: dict[str, Any] = {}
def assert_wrapper(
__wrapped_mock_method__: Callable[..., Any], *args: Any, **kwargs: Any
) -> None:
__tracebackhide__ = True
try:
__wrapped_mock_method__(*args, **kwargs)
return
except AssertionError as e:
if getattr(e, "_mock_introspection_applied", 0):
msg = str(e)
else:
__mock_self = args[0]
msg = str(e)
if __mock_self.call_args is not None:
actual_args, actual_kwargs = __mock_self.call_args
introspection = ""
try:
assert actual_args == args[1:]
except AssertionError as e_args:
introspection += "\nArgs:\n" + str(e_args)
try:
assert actual_kwargs == kwargs
except AssertionError as e_kwargs:
introspection += "\nKwargs:\n" + str(e_kwargs)
if introspection:
msg += "\n\npytest introspection follows:\n" + introspection
e = AssertionError(msg)
e._mock_introspection_applied = True # type:ignore[attr-defined]
raise e
def assert_has_calls_wrapper(
__wrapped_mock_method__: Callable[..., Any], *args: Any, **kwargs: Any
) -> None:
__tracebackhide__ = True
try:
__wrapped_mock_method__(*args, **kwargs)
return
except AssertionError as e:
any_order = kwargs.get("any_order", False)
if getattr(e, "_mock_introspection_applied", 0) or any_order:
msg = str(e)
else:
__mock_self = args[0]
msg = str(e)
if __mock_self.call_args_list is not None:
actual_calls = list(__mock_self.call_args_list)
expect_calls = args[1]
introspection = ""
from itertools import zip_longest
for actual_call, expect_call in zip_longest(actual_calls, expect_calls):
if actual_call is not None:
actual_args, actual_kwargs = actual_call
else:
actual_args = tuple()
actual_kwargs = {}
if expect_call is not None:
_, expect_args, expect_kwargs = expect_call
else:
expect_args = tuple()
expect_kwargs = {}
try:
assert actual_args == expect_args
except AssertionError as e_args:
introspection += "\nArgs:\n" + str(e_args)
try:
assert actual_kwargs == expect_kwargs
except AssertionError as e_kwargs:
introspection += "\nKwargs:\n" + str(e_kwargs)
if introspection:
msg += "\n\npytest introspection follows:\n" + introspection
e = AssertionError(msg)
e._mock_introspection_applied = True # type:ignore[attr-defined]
raise e
def wrap_assert_not_called(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_not_called"], *args, **kwargs)
def wrap_assert_called_with(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_called_with"], *args, **kwargs)
def wrap_assert_called_once(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_called_once"], *args, **kwargs)
def wrap_assert_called_once_with(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_called_once_with"], *args, **kwargs)
def wrap_assert_has_calls(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_has_calls_wrapper(
_mock_module_originals["assert_has_calls"], *args, **kwargs
)
def wrap_assert_any_call(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_any_call"], *args, **kwargs)
def wrap_assert_called(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_called"], *args, **kwargs)
def wrap_assert_not_awaited(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_not_awaited"], *args, **kwargs)
def wrap_assert_awaited_with(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_awaited_with"], *args, **kwargs)
def wrap_assert_awaited_once(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_awaited_once"], *args, **kwargs)
def wrap_assert_awaited_once_with(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_awaited_once_with"], *args, **kwargs)
def wrap_assert_has_awaits(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_has_awaits"], *args, **kwargs)
def wrap_assert_any_await(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_any_await"], *args, **kwargs)
def wrap_assert_awaited(*args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
assert_wrapper(_mock_module_originals["assert_awaited"], *args, **kwargs)
def wrap_assert_methods(config: Any) -> None:
"""
Wrap assert methods of mock module so we can hide their traceback and
add introspection information to specified argument asserts.
"""
# Make sure we only do this once
if _mock_module_originals:
return
mock_module = get_mock_module(config)
wrappers = {
"assert_called": wrap_assert_called,
"assert_called_once": wrap_assert_called_once,
"assert_called_with": wrap_assert_called_with,
"assert_called_once_with": wrap_assert_called_once_with,
"assert_any_call": wrap_assert_any_call,
"assert_has_calls": wrap_assert_has_calls,
"assert_not_called": wrap_assert_not_called,
}
for method, wrapper in wrappers.items():
try:
original = getattr(mock_module.NonCallableMock, method)
except AttributeError: # pragma: no cover
continue
_mock_module_originals[method] = original
patcher = mock_module.patch.object(mock_module.NonCallableMock, method, wrapper)
patcher.start()
_mock_module_patches.append(patcher)
if hasattr(mock_module, "AsyncMock"):
async_wrappers = {
"assert_awaited": wrap_assert_awaited,
"assert_awaited_once": wrap_assert_awaited_once,
"assert_awaited_with": wrap_assert_awaited_with,
"assert_awaited_once_with": wrap_assert_awaited_once_with,
"assert_any_await": wrap_assert_any_await,
"assert_has_awaits": wrap_assert_has_awaits,
"assert_not_awaited": wrap_assert_not_awaited,
}
for method, wrapper in async_wrappers.items():
try:
original = getattr(mock_module.AsyncMock, method)
except AttributeError: # pragma: no cover
continue
_mock_module_originals[method] = original
patcher = mock_module.patch.object(mock_module.AsyncMock, method, wrapper)
patcher.start()
_mock_module_patches.append(patcher)
config.add_cleanup(unwrap_assert_methods)
def unwrap_assert_methods() -> None:
for patcher in _mock_module_patches:
try:
patcher.stop()
except RuntimeError as e:
# a patcher might have been stopped by user code (#137)
# so we need to catch this error here and ignore it;
# unfortunately there's no public API to check if a patch
# has been started, so catching the error it is
if str(e) == "stop called on unstarted patcher":
pass
else:
raise
_mock_module_patches[:] = []
_mock_module_originals.clear()
def pytest_addoption(parser: Any) -> None:
parser.addini(
"mock_traceback_monkeypatch",
"Monkeypatch the mock library to improve reporting of the "
"assert_called_... methods",
default=True,
)
parser.addini(
"mock_use_standalone_module",
'Use standalone "mock" (from PyPI) instead of builtin "unittest.mock" '
"on Python 3",
default=False,
)
def pytest_configure(config: Any) -> None:
tb = config.getoption("--tb", default="auto")
if (
parse_ini_boolean(config.getini("mock_traceback_monkeypatch"))
and tb != "native"
):
wrap_assert_methods(config)
| MockerFixture |
python | facebook__pyre-check | client/identifiers.py | {
"start": 747,
"end": 2299
} | class ____(enum.Enum):
"""
The pyre flavor acts as a name of a particular language-server + daemon
pair. Its value is a factor in determining socket and log paths, which
have to be kept separate if we are running multiple language servers
in parallel, as well as tagging telemetry data.
On the client side, the enum value is only really important for language
servers, which are long-lived and in most cases need custom daemons.
All standard pyre commands use the CLASSIC daemon.
The flavor is restricted to be one of a few known options because we rely
on the values for metrics and also because we depend on the names being
short enough not to exceed socket path limits.
"""
CLASSIC = "classic"
SHADOW = "shadow"
def path_suffix(self) -> str:
return "" if self == PyreFlavor.CLASSIC else f"__{self.value}"
@staticmethod
def persistent_choices() -> List[str]:
"""
Valid flavors to use for the `pyre persistent` command.
"""
return [
PyreFlavor.CLASSIC.value,
PyreFlavor.SHADOW.value,
]
@staticmethod
def server_flavor_choices() -> List[str]:
return [
PyreFlavor.CLASSIC.value,
]
def server_log_subdirectory(self) -> str:
return self.value
def simple_name(self) -> str:
if self == PyreFlavor.CLASSIC:
return "Type Checking"
else:
raise IllegalFlavorException(f"No simple name defined for flavor {self}")
| PyreFlavor |
python | getsentry__sentry | src/sentry/integrations/github/blame.py | {
"start": 603,
"end": 730
} | class ____(TypedDict):
oid: str
author: GitHubAuthor | None
message: str
committedDate: str
| GitHubFileBlameCommit |
python | encode__httpx | httpx/_exceptions.py | {
"start": 5573,
"end": 5751
} | class ____(Exception):
"""
URL is improperly formed or cannot be parsed.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
| InvalidURL |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 14131,
"end": 14333
} | class ____(Serializer):
def serialize(
self, obj: CheckinProcessingError, attrs, user, **kwargs
) -> CheckinProcessingErrorData:
return obj.to_dict()
| CheckinProcessingErrorSerializer |
python | huggingface__transformers | src/transformers/models/qwen3_moe/modular_qwen3_moe.py | {
"start": 1895,
"end": 1946
} | class ____(Qwen2MoeExperts):
pass
| Qwen3MoeExperts |
python | walkccc__LeetCode | solutions/2809. Minimum Time to Make Array Sum At Most x/2809.py | {
"start": 0,
"end": 898
} | class ____:
def minimumTime(self, nums1: list[int], nums2: list[int], x: int) -> int:
n = len(nums1)
# dp[i][j] := the maximum reduced value if we do j operations on the first
# i numbers
dp = [[0] * (n + 1) for _ in range(n + 1)]
sum1 = sum(nums1)
sum2 = sum(nums2)
for i, (num2, num1) in enumerate(sorted(zip(nums2, nums1)), 1):
for j in range(1, i + 1):
dp[i][j] = max(
# the maximum reduced value if we do j operations on the first
# i - 1 numbers
dp[i - 1][j],
# the maximum reduced value if we do j - 1 operations on the first
# i - 1 numbers + making the i-th number of `nums1` to 0 at the
# j-th operation
dp[i - 1][j - 1] + num2 * j + num1
)
for op in range(n + 1):
if sum1 + sum2 * op - dp[n][op] <= x:
return op
return -1
| Solution |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 4011,
"end": 4099
} | class ____(models.Model):
username = models.CharField(max_length=30, unique=True)
| User |
python | django-extensions__django-extensions | django_extensions/auth/mixins.py | {
"start": 61,
"end": 464
} | class ____(UserPassesTestMixin):
model_permission_user_field = "user"
def get_model_permission_user_field(self):
return self.model_permission_user_field
def test_func(self):
model_attr = self.get_model_permission_user_field()
current_user = self.request.user
return current_user == getattr(self.get_queryset().first(), model_attr)
| ModelUserFieldPermissionMixin |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 130245,
"end": 132555
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('fan', c_uint),
('speed', c_uint),
]
nvmlFanSpeedInfo_v1 = 0x100000C
def nvmlDeviceGetFanSpeedRPM(handle):
c_fanSpeed = c_nvmlFanSpeedInfo_t()
c_fanSpeed.fan = 0
c_fanSpeed.version = nvmlFanSpeedInfo_v1
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeedRPM")
ret = fn(handle, byref(c_fanSpeed))
_nvmlCheckReturn(ret)
return c_fanSpeed.speed
def nvmlDeviceGetTargetFanSpeed(handle, fan):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTargetFanSpeed")
ret = fn(handle, fan, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetNumFans(device):
c_numFans = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumFans")
ret = fn(device, byref(c_numFans))
_nvmlCheckReturn(ret)
return c_numFans.value
def nvmlDeviceSetDefaultFanSpeed_v2(handle, index):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultFanSpeed_v2");
ret = fn(handle, index)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceGetMinMaxFanSpeed(handle, minSpeed=c_uint(), maxSpeed=c_uint()):
isReference = (type(minSpeed) is not c_uint) or (type(maxSpeed) is not c_uint)
minSpeedRef = minSpeed if isReference else byref(minSpeed)
maxSpeedRef = maxSpeed if isReference else byref(maxSpeed)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinMaxFanSpeed")
ret = fn(handle, minSpeedRef, maxSpeedRef)
_nvmlCheckReturn(ret)
return NVML_SUCCESS if isReference else [minSpeed.value, maxSpeed.value]
def nvmlDeviceGetFanControlPolicy_v2(handle, fan, fanControlPolicy=c_uint()):
isReference = type(fanControlPolicy) is not c_uint
fanControlPolicyRef = fanControlPolicy if isReference else byref(fanControlPolicy)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanControlPolicy_v2")
ret = fn(handle, fan, fanControlPolicyRef)
_nvmlCheckReturn(ret)
return NVML_SUCCESS if isReference else fanControlPolicy.value
def nvmlDeviceSetFanControlPolicy(handle, fan, fanControlPolicy):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetFanControlPolicy")
ret = fn(handle, fan, _nvmlFanControlPolicy_t(fanControlPolicy))
_nvmlCheckReturn(ret)
return NVML_SUCCESS
| c_nvmlFanSpeedInfo_t |
python | lxml__lxml | benchmark/bench_etree.py | {
"start": 443,
"end": 12547
} | class ____(benchbase.TreeBenchMark):
@anytree
@nochange
def bench_iter_children(self, root):
for child in root:
pass
@anytree
@nochange
def bench_iter_children_reversed(self, root):
for child in reversed(root):
pass
@anytree
@nochange
def bench_first_child(self, root):
for i in self.repeat1000:
child = root[0]
@anytree
@nochange
def bench_last_child(self, root):
for i in self.repeat1000:
child = root[-1]
@widetree
@nochange
def bench_middle_child(self, root):
pos = len(root) // 2
for i in self.repeat1000:
child = root[pos]
@nochange
@with_attributes(False)
@with_text(text=True)
def bench_tostring_text_ascii(self, root):
self.etree.tostring(root, method="text")
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
def bench_tostring_text_unicode(self, root):
self.etree.tostring(root, method="text", encoding='unicode')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
def bench_tostring_text_utf16(self, root):
self.etree.tostring(root, method="text", encoding='UTF-16')
@nochange
@with_attributes(False)
@with_text(text=True, utext=True)
@onlylib('lxe')
@children
def bench_tostring_text_utf8_with_tail(self, children):
for child in children:
self.etree.tostring(child, method="text",
encoding='UTF-8', with_tail=True)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8(self, root):
self.etree.tostring(root, encoding='UTF-8')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf16(self, root):
self.etree.tostring(root, encoding='UTF-16')
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_tostring_utf8_unicode_XML(self, root):
xml = self.etree.tostring(root, encoding='UTF-8').decode('UTF-8')
self.etree.XML(xml)
@nochange
@with_attributes(True, False)
@with_text(text=True, utext=True)
def bench_write_utf8_parse_bytesIO(self, root):
f = BytesIO()
self.etree.ElementTree(root).write(f, encoding='UTF-8')
f.seek(0)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_parse_bytesIO(self, root_xml):
f = BytesIO(root_xml)
self.etree.parse(f)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_XML(self, root_xml):
self.etree.XML(root_xml)
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_bytesIO(self, root_xml):
f = BytesIO(root_xml)
for event, element in self.etree.iterparse(f):
pass
@with_attributes(True, False)
@with_text(text=True, utext=True)
@serialized
def bench_iterparse_bytesIO_clear(self, root_xml):
f = BytesIO(root_xml)
for event, element in self.etree.iterparse(f):
element.clear()
@anytree
def bench_append_from_document(self, root1, root2):
# == "1,2 2,3 1,3 3,1 3,2 2,1" # trees 1 and 2, or 2 and 3, or ...
for el in root2:
root1.append(el)
@anytree
def bench_insert_from_document(self, root1, root2):
pos = len(root1)//2
for el in root2:
root1.insert(pos, el)
pos = pos + 1
def bench_rotate_children(self, root):
# == "1 2 3" # runs on any single tree independently
for i in range(100):
el = root[0]
del root[0]
root.append(el)
@widetree
def bench_reorder(self, root):
for i in range(1,len(root)//2):
el = root[0]
del root[0]
root[-i:-i] = [ el ]
@widetree
def bench_reorder_slice(self, root):
for i in range(1,len(root)//2):
els = root[0:1]
del root[0]
root[-i:-i] = els
def bench_clear(self, root):
root.clear()
@widetree
@nochange
@children
def bench_len(self, children):
for child in children:
map(len, repeat(child, 20))
@widetree
@children
def bench_create_subelements(self, children):
SubElement = self.etree.SubElement
for child in children:
SubElement(child, '{test}test')
@widetree
@children
def bench_append_elements(self, children):
Element = self.etree.Element
for child in children:
el = Element('{test}test')
child.append(el)
@widetree
@nochange
@children
def bench_makeelement(self, children):
empty_attrib = {}
for child in children:
child.makeelement('{test}test', empty_attrib)
@widetree
@nochange
@children
def bench_create_elements(self, children):
Element = self.etree.Element
for child in children:
Element('{test}test')
@widetree
@children
def bench_replace_children_element(self, children):
Element = self.etree.Element
for child in children:
el = Element('{test}test')
child[:] = [el]
@widetree
@children
def bench_replace_children(self, children):
els = [ self.etree.Element("newchild") ]
for child in children:
child[:] = els
@widetree
def bench_remove_children(self, root):
for child in root:
root.remove(child)
@widetree
def bench_remove_children_reversed(self, root):
for child in reversed(root):
root.remove(child)
@widetree
@children
def bench_set_attributes(self, children):
for child in children:
child.set('a', 'bla')
@widetree
@with_attributes(True)
@children
@nochange
def bench_get_attributes(self, children):
for child in children:
child.get('bla1')
child.get('{attr}test1')
@widetree
@children
def bench_setget_attributes(self, children):
for child in children:
child.set('a', 'bla')
for child in children:
child.get('a')
@widetree
@nochange
def bench_root_getchildren(self, root):
root.getchildren()
@widetree
@nochange
def bench_root_list_children(self, root):
list(root)
@widesubtree
@nochange
@children
def bench_getchildren(self, children):
for child in children:
child.getchildren()
@widesubtree
@nochange
@children
def bench_get_children_slice(self, children):
for child in children:
child[:]
@widesubtree
@nochange
@children
def bench_get_children_slice_2x(self, children):
for child in children:
child[:]
child[:]
@nochange
@children
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy(self, children):
for child in children:
copy.deepcopy(child)
@nochange
@with_attributes(True, False)
@with_text(utext=True, text=True, no_text=True)
def bench_deepcopy_all(self, root):
copy.deepcopy(root)
@widetree
@nochange
@children
def bench_tag(self, children):
for child in children:
child.tag
@widetree
@nochange
@children
def bench_tag_repeat(self, children):
for child in children:
for i in self.repeat100:
child.tag
@widetree
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text(self, children):
for child in children:
child.text
@widetree
@nochange
@with_text(utext=True, text=True, no_text=True)
@children
def bench_text_repeat(self, children):
for child in children:
for i in self.repeat500:
child.text
@widetree
@children
def bench_set_text(self, children):
text = TEXT
for child in children:
child.text = text
@widetree
@children
def bench_set_utext(self, children):
text = UTEXT
for child in children:
child.text = text
@widetree
@nochange
@onlylib('lxe')
def bench_index(self, root):
for child in root:
root.index(child)
@widetree
@nochange
@onlylib('lxe')
def bench_index_slice(self, root):
for child in root[5:100]:
root.index(child, 5, 100)
@widetree
@nochange
@onlylib('lxe')
def bench_index_slice_neg(self, root):
for child in root[-100:-5]:
root.index(child, start=-100, stop=-5)
@nochange
def bench_iter_all(self, root):
list(root.iter())
@nochange
def bench_iter_one_at_a_time(self, root):
list(islice(root.iter(), 2**30, None))
@nochange
def bench_iter_islice(self, root):
list(islice(root.iter(), 10, 110))
@nochange
def bench_iter_tag(self, root):
list(islice(root.iter(self.SEARCH_TAG), 3, 10))
@nochange
def bench_iter_tag_all(self, root):
list(root.iter(self.SEARCH_TAG))
@nochange
def bench_iter_tag_one_at_a_time(self, root):
list(islice(root.iter(self.SEARCH_TAG), 2**30, None))
@nochange
def bench_iter_tag_none(self, root):
list(root.iter("{ThisShould}NeverExist"))
@nochange
def bench_iter_tag_text(self, root):
[ e.text for e in root.iter(self.SEARCH_TAG) ]
@nochange
def bench_findall(self, root):
root.findall(".//*")
@nochange
def bench_findall_child(self, root):
root.findall(".//*/" + self.SEARCH_TAG)
@nochange
def bench_findall_tag(self, root):
root.findall(".//" + self.SEARCH_TAG)
@nochange
def bench_findall_path(self, root):
root.findall(".//*[%s]/./%s/./*" % (self.SEARCH_TAG, self.SEARCH_TAG))
@nochange
@onlylib('lxe')
def bench_xpath_path(self, root):
ns, tag = self.SEARCH_TAG[1:].split('}')
root.xpath(".//*[p:%s]/./p:%s/./*" % (tag,tag),
namespaces = {'p':ns})
@nochange
def bench_iterfind(self, root):
list(root.iterfind(".//*"))
@nochange
def bench_iterfind_tag(self, root):
list(root.iterfind(".//" + self.SEARCH_TAG))
@nochange
def bench_iterfind_islice(self, root):
list(islice(root.iterfind(".//*"), 10, 110))
_bench_xpath_single_xpath = None
@nochange
@onlylib('lxe')
def bench_xpath_single(self, root):
xpath = self._bench_xpath_single_xpath
if xpath is None:
ns, tag = self.SEARCH_TAG[1:].split('}')
xpath = self._bench_xpath_single_xpath = self.etree.XPath(
'.//p:%s[1]' % tag, namespaces={'p': ns})
xpath(root)
@nochange
def bench_find_single(self, root):
root.find(".//%s" % self.SEARCH_TAG)
@nochange
def bench_iter_single(self, root):
next(root.iter(self.SEARCH_TAG))
_bench_xpath_two_xpath = None
@nochange
@onlylib('lxe')
def bench_xpath_two(self, root):
xpath = self._bench_xpath_two_xpath
if xpath is None:
ns, tag = self.SEARCH_TAG[1:].split('}')
xpath = self._bench_xpath_two_xpath = self.etree.XPath(
'.//p:%s[position() < 3]' % tag, namespaces={'p': ns})
xpath(root)
@nochange
def bench_iterfind_two(self, root):
it = root.iterfind(".//%s" % self.SEARCH_TAG)
next(it)
next(it)
@nochange
def bench_iter_two(self, root):
it = root.iter(self.SEARCH_TAG)
next(it)
next(it)
if __name__ == '__main__':
benchbase.main(BenchMark)
| BenchMark |
python | numba__numba | numba/tests/test_sys_monitoring.py | {
"start": 34017,
"end": 34693
} | class ____(TestCase):
def test_skipping_of_tests_if_monitoring_in_use(self):
# check that the unit tests in the TestMonitoring class above will skip
# if there are other monitoring tools registered in the thread (in this
# case cProfile is used to cause that effect).
r = self.subprocess_test_runner(TestMonitoring.__module__,
'TestMonitoring',
'test_start_event',
flags={'-m': 'cProfile'})
self.assertIn("skipped=1", str(r))
@unittest.skipUnless(PYVERSION >= (3, 12), "needs Python 3.12+")
| TestMonitoringSelfTest |
python | PrefectHQ__prefect | src/prefect/server/orchestration/global_policy.py | {
"start": 11595,
"end": 12673
} | class ____(
BaseUniversalTransform[
orm_models.Run, Union[core.FlowRunPolicy, core.TaskRunPolicy]
]
):
"""
Records the scheduled time on a run.
When a run enters a scheduled state, `run.next_scheduled_start_time` is set to
the state's scheduled time. When leaving a scheduled state,
`run.next_scheduled_start_time` is unset.
"""
async def before_transition(
self, context: GenericOrchestrationContext[orm_models.Run, Any]
) -> None:
if self.nullified_transition():
return
# remove the next scheduled start time if exiting a scheduled state
if context.initial_state and context.initial_state.is_scheduled():
context.run.next_scheduled_start_time = None
# set next scheduled start time if entering a scheduled state
if context.proposed_state is not None and context.proposed_state.is_scheduled():
context.run.next_scheduled_start_time = (
context.proposed_state.state_details.scheduled_time
)
| SetNextScheduledStartTime |
python | walkccc__LeetCode | solutions/657. Robot Return to Origin/657.py | {
"start": 0,
"end": 150
} | class ____:
def judgeCircle(self, moves: str) -> bool:
return moves.count('R') == moves.count('L') and moves.count('U') == moves.count('D')
| Solution |
python | hynek__structlog | tests/test_threadlocal.py | {
"start": 8065,
"end": 11271
} | class ____:
def test_alias(self):
"""
We're keeping the old alias around.
"""
assert merge_threadlocal_context is merge_threadlocal
def test_bind_and_merge(self):
"""
Binding a variable causes it to be included in the result of
merge_threadlocal.
"""
with pytest.deprecated_call():
bind_threadlocal(a=1)
with pytest.deprecated_call():
assert {"a": 1, "b": 2} == merge_threadlocal(None, None, {"b": 2})
def test_clear(self):
"""
The thread-local context can be cleared, causing any previously bound
variables to not be included in merge_threadlocal's result.
"""
with pytest.deprecated_call():
bind_threadlocal(a=1)
with pytest.deprecated_call():
clear_threadlocal()
with pytest.deprecated_call():
assert {"b": 2} == merge_threadlocal(None, None, {"b": 2})
def test_merge_works_without_bind(self):
"""
merge_threadlocal returns values as normal even when there has
been no previous calls to bind_threadlocal.
"""
with pytest.deprecated_call():
assert {"b": 2} == merge_threadlocal(None, None, {"b": 2})
def test_multiple_binds(self):
"""
Multiple calls to bind_threadlocal accumulate values instead of
replacing them.
"""
with pytest.deprecated_call():
bind_threadlocal(a=1, b=2)
bind_threadlocal(c=3)
with pytest.deprecated_call():
assert {"a": 1, "b": 2, "c": 3} == merge_threadlocal(
None, None, {"b": 2}
)
def test_unbind_threadlocal(self):
"""
Test that unbinding from threadlocal works for keys that exist
and does not raise error when they do not exist.
"""
with pytest.deprecated_call():
bind_threadlocal(a=234, b=34)
with pytest.deprecated_call():
assert {"a": 234, "b": 34} == get_threadlocal()
with pytest.deprecated_call():
unbind_threadlocal("a")
with pytest.deprecated_call():
assert {"b": 34} == get_threadlocal()
with pytest.deprecated_call():
unbind_threadlocal("non-existing-key")
with pytest.deprecated_call():
assert {"b": 34} == get_threadlocal()
def test_get_context_no_context(self):
"""
If there is no context yet, _get_context will add it.
"""
# Don't rely on test order.
if hasattr(_CONTEXT, "context"):
del _CONTEXT.context
with pytest.raises(AttributeError):
_CONTEXT.context
with pytest.deprecated_call():
assert {} == get_threadlocal()
def test_get_merged(self):
"""
Returns a copy of the threadlocal context merged with the logger's
context.
"""
with pytest.deprecated_call():
bind_threadlocal(x=1)
log = structlog.get_logger().bind(y=2)
with pytest.deprecated_call():
assert {"x": 1, "y": 2} == get_merged_threadlocal(log)
| TestNewThreadLocal |
python | scikit-learn__scikit-learn | examples/gaussian_process/plot_gpr_on_structured_data.py | {
"start": 2156,
"end": 5869
} | class ____(GenericKernelMixin, Kernel):
"""
A minimal (but valid) convolutional kernel for sequences of variable
lengths."""
def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
self.baseline_similarity = baseline_similarity
self.baseline_similarity_bounds = baseline_similarity_bounds
@property
def hyperparameter_baseline_similarity(self):
return Hyperparameter(
"baseline_similarity", "numeric", self.baseline_similarity_bounds
)
def _f(self, s1, s2):
"""
kernel value between a pair of sequences
"""
return sum(
[1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
)
def _g(self, s1, s2):
"""
kernel derivative between a pair of sequences
"""
return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
def __call__(self, X, Y=None, eval_gradient=False):
if Y is None:
Y = X
if eval_gradient:
return (
np.array([[self._f(x, y) for y in Y] for x in X]),
np.array([[[self._g(x, y)] for y in Y] for x in X]),
)
else:
return np.array([[self._f(x, y) for y in Y] for x in X])
def diag(self, X):
return np.array([self._f(x, x) for x in X])
def is_stationary(self):
return False
def clone_with_theta(self, theta):
cloned = clone(self)
cloned.theta = theta
return cloned
kernel = SequenceKernel()
# %%
# Sequence similarity matrix under the kernel
# ===========================================
import matplotlib.pyplot as plt
X = np.array(["AGCT", "AGC", "AACT", "TAA", "AAA", "GAACA"])
K = kernel(X)
D = kernel.diag(X)
plt.figure(figsize=(8, 5))
plt.imshow(np.diag(D**-0.5).dot(K).dot(np.diag(D**-0.5)))
plt.xticks(np.arange(len(X)), X)
plt.yticks(np.arange(len(X)), X)
plt.title("Sequence similarity under the kernel")
plt.show()
# %%
# Regression
# ==========
X = np.array(["AGCT", "AGC", "AACT", "TAA", "AAA", "GAACA"])
Y = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0])
training_idx = [0, 1, 3, 4]
gp = GaussianProcessRegressor(kernel=kernel)
gp.fit(X[training_idx], Y[training_idx])
plt.figure(figsize=(8, 5))
plt.bar(np.arange(len(X)), gp.predict(X), color="b", label="prediction")
plt.bar(training_idx, Y[training_idx], width=0.2, color="r", alpha=1, label="training")
plt.xticks(np.arange(len(X)), X)
plt.title("Regression on sequences")
plt.legend()
plt.show()
# %%
# Classification
# ==============
X_train = np.array(["AGCT", "CGA", "TAAC", "TCG", "CTTT", "TGCT"])
# whether there are 'A's in the sequence
Y_train = np.array([True, True, True, False, False, False])
gp = GaussianProcessClassifier(kernel)
gp.fit(X_train, Y_train)
X_test = ["AAA", "ATAG", "CTC", "CT", "C"]
Y_test = [True, True, False, False, False]
plt.figure(figsize=(8, 5))
plt.scatter(
np.arange(len(X_train)),
[1.0 if c else -1.0 for c in Y_train],
s=100,
marker="o",
edgecolor="none",
facecolor=(1, 0.75, 0),
label="training",
)
plt.scatter(
len(X_train) + np.arange(len(X_test)),
[1.0 if c else -1.0 for c in Y_test],
s=100,
marker="o",
edgecolor="none",
facecolor="r",
label="truth",
)
plt.scatter(
len(X_train) + np.arange(len(X_test)),
[1.0 if c else -1.0 for c in gp.predict(X_test)],
s=100,
marker="x",
facecolor="b",
linewidth=2,
label="prediction",
)
plt.xticks(np.arange(len(X_train) + len(X_test)), np.concatenate((X_train, X_test)))
plt.yticks([-1, 1], [False, True])
plt.title("Classification on sequences")
plt.legend()
plt.show()
| SequenceKernel |
python | eventlet__eventlet | eventlet/semaphore.py | {
"start": 6086,
"end": 7165
} | class ____(Semaphore):
"""A bounded semaphore checks to make sure its current value doesn't exceed
its initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity. If the
semaphore is released too many times it's a sign of a bug. If not given,
*value* defaults to 1.
"""
def __init__(self, value=1):
super().__init__(value)
self.original_counter = value
def release(self, blocking=True):
"""Release a semaphore, incrementing the internal counter by one. If
the counter would exceed the initial value, raises ValueError. When
it was zero on entry and another thread is waiting for it to become
larger than zero again, wake up that thread.
The *blocking* argument is for consistency with :class:`CappedSemaphore`
and is ignored
"""
if self.counter >= self.original_counter:
raise ValueError("Semaphore released too many times")
return super().release(blocking)
| BoundedSemaphore |
python | huggingface__transformers | tests/models/mistral/test_modeling_mistral.py | {
"start": 1461,
"end": 1594
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = MistralModel
@require_torch
| MistralModelTester |
python | scrapy__scrapy | tests/test_pipeline_crawl.py | {
"start": 1119,
"end": 1267
} | class ____(MediaDownloadSpider):
name = "brokenmedia"
def _process_url(self, url):
return url + ".foo"
| BrokenLinksMediaDownloadSpider |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_quotes/docstring_singles_class.py | {
"start": 0,
"end": 290
} | class ____():
''' Double quotes single line class docstring '''
''' Not a docstring '''
def foo(self, bar='''not a docstring'''):
''' Double quotes single line method docstring'''
pass
class Nested(foo()[:]): ''' inline docstring '''; pass
| SingleLineDocstrings |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 69432,
"end": 70129
} | class ____(BaseModel):
type: Literal["SubstreamPartitionRouter"]
parent_stream_configs: List[ParentStreamConfig] = Field(
...,
description="Specifies which parent streams are being iterated over and how parent records should be used to partition the child stream data set.",
title="Parent Stream Configs",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
CompositeErrorHandler.update_forward_refs()
DeclarativeSource.update_forward_refs()
SelectiveAuthenticator.update_forward_refs()
DeclarativeStream.update_forward_refs()
SessionTokenAuthenticator.update_forward_refs()
SimpleRetriever.update_forward_refs()
| SubstreamPartitionRouter |
python | jina-ai__jina | tests/unit/hubble-executor/exec.py | {
"start": 61,
"end": 246
} | class ____(Executor):
def __init__(self, bar, **kwargs):
super().__init__(**kwargs)
self.bar = bar
@requests
def foo(self, **kwargs):
hello()
| MyExecutor |
python | kamyu104__LeetCode-Solutions | Python/partition-list.py | {
"start": 233,
"end": 843
} | class ____(object):
# @param head, a ListNode
# @param x, an integer
# @return a ListNode
def partition(self, head, x):
dummySmaller, dummyGreater = ListNode(-1), ListNode(-1)
smaller, greater = dummySmaller, dummyGreater
while head:
if head.val < x:
smaller.next = head
smaller = smaller.next
else:
greater.next = head
greater = greater.next
head = head.next
smaller.next = dummyGreater.next
greater.next = None
return dummySmaller.next
| Solution |
python | PrefectHQ__prefect | tests/blocks/test_core.py | {
"start": 80907,
"end": 82500
} | class ____:
def test_no_description_configured(self):
class A(Block):
message: str
assert A.get_description() is None
def test_description_from_docstring(self, caplog):
class A(Block):
"""
A block, verily
Heading:
This extra stuff shouldn't show up in the description
"""
message: str
assert A.get_description() == "A block, verily"
assert len(caplog.records) == 0
def test_description_override(self):
class A(Block):
"""I won't show up in this block's description"""
_description = "But I will"
message: str
assert A.get_description() == "But I will"
def test_no_griffe_logs(self, caplog, capsys, recwarn):
"""
Ensures there are no extraneous output printed/warned.
"""
class A(Block):
"""
Without disable logger, this spawns griffe warnings.
Args:
string (str): This should spawn a warning
"""
A()
assert caplog.record_tuples == []
captured = capsys.readouterr()
assert captured.out == ""
assert captured.err == ""
assert len(recwarn) == 0
# to be extra sure that we are printing anything
# we shouldn't be
print("Sanity check!")
captured = capsys.readouterr()
assert captured.out == "Sanity check!\n"
warnings.warn("Sanity check two!")
assert len(recwarn) == 1
| TestGetDescription |
python | pandas-dev__pandas | pandas/plotting/_matplotlib/core.py | {
"start": 44697,
"end": 51464
} | class ____(PlanePlot):
@property
def _kind(self) -> Literal["scatter"]:
return "scatter"
def __init__(
self,
data,
x,
y,
s=None,
c=None,
*,
colorbar: bool | lib.NoDefault = lib.no_default,
norm=None,
**kwargs,
) -> None:
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
elif is_hashable(s) and s in data.columns:
s = data[s]
self.s = s
self.colorbar = colorbar
self.norm = norm
super().__init__(data, x, y, **kwargs)
if is_integer(c) and not holds_integer(self.data.columns):
c = self.data.columns[c]
self.c = c
@register_pandas_matplotlib_converters
def _make_plot(self, fig: Figure) -> None:
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
from pandas import Series
x_data = data[x]
s = Series(index=x_data)
if use_dynamic_x(ax, s.index):
s = maybe_convert_index(ax, s)
freq, s = prepare_ts_data(s, ax, self.kwds)
x_data = s.index
c_is_column = is_hashable(c) and c in self.data.columns
color_by_categorical = c_is_column and isinstance(
self.data[c].dtype, CategoricalDtype
)
color = self.color
c_values = self._get_c_values(color, color_by_categorical, c_is_column)
norm, cmap = self._get_norm_and_cmap(c_values, color_by_categorical)
cb = self._get_colorbar(c_values, c_is_column)
if self.legend:
label = self.label
else:
label = None
# if a list of non-color strings is passed in as c, color points
# by uniqueness of the strings, such same strings get same color
create_colors = not self._are_valid_colors(c_values)
if create_colors:
color_mapping = self._get_color_mapping(c_values)
c_values = [color_mapping[s] for s in c_values]
# build legend for labeling custom colors
ax.legend(
handles=[
mpl.patches.Circle((0, 0), facecolor=c, label=s)
for s, c in color_mapping.items()
]
)
scatter = ax.scatter(
x_data.values,
data[y].values,
c=c_values,
label=label,
cmap=cmap,
norm=norm,
s=self.s,
**self.kwds,
)
if cb:
cbar_label = c if c_is_column else ""
cbar = self._plot_colorbar(ax, fig=fig, label=cbar_label)
if color_by_categorical:
n_cats = len(self.data[c].cat.categories)
cbar.set_ticks(np.linspace(0.5, n_cats - 0.5, n_cats))
cbar.ax.set_yticklabels(self.data[c].cat.categories)
if label is not None:
self._append_legend_handles_labels(
# error: Argument 2 to "_append_legend_handles_labels" of
# "MPLPlot" has incompatible type "Hashable"; expected "str"
scatter,
label, # type: ignore[arg-type]
)
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds["ecolor"] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds)
def _get_c_values(self, color, color_by_categorical: bool, c_is_column: bool):
c = self.c
if c is not None and color is not None:
raise TypeError("Specify exactly one of `c` and `color`")
if c is None and color is None:
c_values = mpl.rcParams["patch.facecolor"]
elif color is not None:
c_values = color
elif color_by_categorical:
c_values = self.data[c].cat.codes
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
return c_values
def _are_valid_colors(self, c_values: Series) -> bool:
# check if c_values contains strings and if these strings are valid mpl colors.
# no need to check numerics as these (and mpl colors) will be validated for us
# in .Axes.scatter._parse_scatter_color_args(...)
unique = np.unique(c_values)
try:
if len(c_values) and all(isinstance(c, str) for c in unique):
mpl.colors.to_rgba_array(unique)
return True
except (TypeError, ValueError) as _:
return False
def _get_color_mapping(self, c_values: Series) -> dict[str, np.ndarray]:
unique = np.unique(c_values)
n_colors = len(unique)
# passing `None` here will default to :rc:`image.cmap`
cmap = mpl.colormaps.get_cmap(self.colormap)
colors = cmap(np.linspace(0, 1, n_colors)) # RGB tuples
return dict(zip(unique, colors, strict=True))
def _get_norm_and_cmap(self, c_values, color_by_categorical: bool):
c = self.c
if self.colormap is not None:
cmap = mpl.colormaps.get_cmap(self.colormap)
# cmap is only used if c_values are integers, otherwise UserWarning.
# GH-53908: additionally call isinstance() because is_integer_dtype
# returns True for "b" (meaning "blue" and not int8 in this context)
elif not isinstance(c_values, str) and is_integer_dtype(c_values):
# pandas uses colormap, matplotlib uses cmap.
cmap = mpl.colormaps["Greys"]
else:
cmap = None
if color_by_categorical and cmap is not None:
n_cats = len(self.data[c].cat.categories)
cmap = mpl.colors.ListedColormap([cmap(i) for i in range(cmap.N)])
bounds = np.linspace(0, n_cats, n_cats + 1)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# TODO: warn that we are ignoring self.norm if user specified it?
# Doesn't happen in any tests 2023-11-09
else:
norm = self.norm
return norm, cmap
def _get_colorbar(self, c_values, c_is_column: bool) -> bool:
# plot colorbar if
# 1. colormap is assigned, and
# 2.`c` is a column containing only numeric values
plot_colorbar = self.colormap or c_is_column
cb = self.colorbar
if cb is lib.no_default:
return is_numeric_dtype(c_values) and plot_colorbar
return cb
| ScatterPlot |
python | redis__redis-py | tests/test_asyncio/test_monitor.py | {
"start": 165,
"end": 2671
} | class ____:
async def test_wait_command_not_found(self, r):
"""Make sure the wait_for_command func works when command is not found"""
async with r.monitor() as m:
response = await wait_for_command(r, m, "nothing")
assert response is None
async def test_response_values(self, r):
db = r.connection_pool.connection_kwargs.get("db", 0)
async with r.monitor() as m:
await r.ping()
response = await wait_for_command(r, m, "PING")
assert isinstance(response["time"], float)
assert response["db"] == db
assert response["client_type"] in ("tcp", "unix")
assert isinstance(response["client_address"], str)
assert isinstance(response["client_port"], str)
assert response["command"] == "PING"
async def test_command_with_quoted_key(self, r):
async with r.monitor() as m:
await r.get('foo"bar')
response = await wait_for_command(r, m, 'GET foo"bar')
assert response["command"] == 'GET foo"bar'
async def test_command_with_binary_data(self, r):
async with r.monitor() as m:
byte_string = b"foo\x92"
await r.get(byte_string)
response = await wait_for_command(r, m, "GET foo\\x92")
assert response["command"] == "GET foo\\x92"
async def test_command_with_escaped_data(self, r):
async with r.monitor() as m:
byte_string = b"foo\\x92"
await r.get(byte_string)
response = await wait_for_command(r, m, "GET foo\\\\x92")
assert response["command"] == "GET foo\\\\x92"
@skip_if_redis_enterprise()
async def test_lua_script(self, r):
async with r.monitor() as m:
script = 'return redis.call("GET", "foo")'
assert await r.eval(script, 0) is None
response = await wait_for_command(r, m, "GET foo")
assert response["command"] == "GET foo"
assert response["client_type"] == "lua"
assert response["client_address"] == "lua"
assert response["client_port"] == ""
@skip_ifnot_redis_enterprise()
async def test_lua_script_in_enterprise(self, r):
async with r.monitor() as m:
script = 'return redis.call("GET", "foo")'
assert await r.eval(script, 0) is None
response = await wait_for_command(r, m, "GET foo")
assert response is None
| TestMonitor |
python | numba__numba | numba/core/types/function_type.py | {
"start": 226,
"end": 3186
} | class ____(Type):
"""
First-class function type.
"""
cconv = None
def __init__(self, signature):
sig = types.unliteral(signature)
self.nargs = len(sig.args)
self.signature = sig
self.ftype = FunctionPrototype(sig.return_type, sig.args)
self._key = self.ftype.key
@property
def key(self):
return self._key
@property
def name(self):
return f'{type(self).__name__}[{self.key}]'
def is_precise(self):
return self.signature.is_precise()
def get_precise(self):
return self
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[code={self._code}]')
self.signature.dump(tab=tab + ' ')
print(f'{tab}END DUMP {type(self).__name__}')
def get_call_type(self, context, args, kws):
from numba.core import typing
if kws:
# First-class functions carry only the type signature
# information and function address value. So, it is not
# possible to determine the positional arguments
# corresponding to the keyword arguments in the call
# expression. For instance, the definition of the
# first-class function may not use the same argument names
# that the caller assumes. [numba/issues/5540].
raise errors.UnsupportedError(
'first-class function call cannot use keyword arguments')
if len(args) != self.nargs:
raise ValueError(
f'mismatch of arguments number: {len(args)} vs {self.nargs}')
sig = self.signature
# check that arguments types match with the signature types exactly
for atype, sig_atype in zip(args, sig.args):
atype = types.unliteral(atype)
if sig_atype.is_precise():
conv_score = context.context.can_convert(
fromty=atype, toty=sig_atype
)
if conv_score is None \
or conv_score > typing.context.Conversion.safe:
raise ValueError(
f'mismatch of argument types: {atype} vs {sig_atype}')
if not sig.is_precise():
for dispatcher in self.dispatchers:
template, pysig, args, kws \
= dispatcher.get_call_template(args, kws)
new_sig = template(context.context).apply(args, kws)
return types.unliteral(new_sig)
return sig
def check_signature(self, other_sig):
"""Return True if signatures match (up to being precise).
"""
sig = self.signature
return (self.nargs == len(other_sig.args)
and (sig == other_sig or not sig.is_precise()))
def unify(self, context, other):
if isinstance(other, types.UndefinedFunctionType) \
and self.nargs == other.nargs:
return self
| FunctionType |
python | getsentry__sentry | src/sentry/api/bases/organization.py | {
"start": 5283,
"end": 5630
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read", "org:write", "org:admin", "org:integrations"],
"POST": ["org:write", "org:admin", "org:integrations"],
"PUT": ["org:write", "org:admin", "org:integrations"],
"DELETE": ["org:admin", "org:integrations"],
}
| OrganizationIntegrationsPermission |
python | apache__thrift | lib/py/src/transport/TSSLSocket.py | {
"start": 1178,
"end": 7851
} | class ____(object):
# SSLContext is not available for Python < 2.7.9
_has_ssl_context = sys.hexversion >= 0x020709F0
# ciphers argument is not available for Python < 2.7.0
_has_ciphers = sys.hexversion >= 0x020700F0
# For python >= 2.7.9, use latest TLS that both client and server
# supports.
# SSL 2.0 and 3.0 are disabled via ssl.OP_NO_SSLv2 and ssl.OP_NO_SSLv3.
# For python < 2.7.9, use TLS 1.0 since TLSv1_X nor OP_NO_SSLvX is
# unavailable.
# For python < 3.6, use SSLv23 since TLS is not available
if sys.version_info < (3, 6):
_default_protocol = ssl.PROTOCOL_SSLv23 if _has_ssl_context else \
ssl.PROTOCOL_TLSv1
else:
_default_protocol = ssl.PROTOCOL_TLS_CLIENT if _has_ssl_context else \
ssl.PROTOCOL_TLSv1
def _init_context(self, ssl_version):
if self._has_ssl_context:
self._context = ssl.SSLContext(ssl_version)
if self._context.protocol == ssl.PROTOCOL_SSLv23:
self._context.options |= ssl.OP_NO_SSLv2
self._context.options |= ssl.OP_NO_SSLv3
else:
self._context = None
self._ssl_version = ssl_version
@property
def _should_verify(self):
if self._has_ssl_context:
return self._context.verify_mode != ssl.CERT_NONE
else:
return self.cert_reqs != ssl.CERT_NONE
@property
def ssl_version(self):
if self._has_ssl_context:
return self.ssl_context.protocol
else:
return self._ssl_version
@property
def ssl_context(self):
return self._context
SSL_VERSION = _default_protocol
"""
Default SSL version.
For backwards compatibility, it can be modified.
Use __init__ keyword argument "ssl_version" instead.
"""
def _deprecated_arg(self, args, kwargs, pos, key):
if len(args) <= pos:
return
real_pos = pos + 3
warnings.warn(
'%dth positional argument is deprecated.'
'please use keyword argument instead.'
% real_pos, DeprecationWarning, stacklevel=3)
if key in kwargs:
raise TypeError(
'Duplicate argument: %dth argument and %s keyword argument.'
% (real_pos, key))
kwargs[key] = args[pos]
def _unix_socket_arg(self, host, port, args, kwargs):
key = 'unix_socket'
if host is None and port is None and len(args) == 1 and key not in kwargs:
kwargs[key] = args[0]
return True
return False
def __getattr__(self, key):
if key == 'SSL_VERSION':
warnings.warn(
'SSL_VERSION is deprecated.'
'please use ssl_version attribute instead.',
DeprecationWarning, stacklevel=2)
return self.ssl_version
def __init__(self, server_side, host, ssl_opts):
self._server_side = server_side
if TSSLBase.SSL_VERSION != self._default_protocol:
warnings.warn(
'SSL_VERSION is deprecated.'
'please use ssl_version keyword argument instead.',
DeprecationWarning, stacklevel=2)
self._context = ssl_opts.pop('ssl_context', None)
self._server_hostname = None
if not self._server_side:
self._server_hostname = ssl_opts.pop('server_hostname', host)
if self._context:
self._custom_context = True
if ssl_opts:
raise ValueError(
'Incompatible arguments: ssl_context and %s'
% ' '.join(ssl_opts.keys()))
if not self._has_ssl_context:
raise ValueError(
'ssl_context is not available for this version of Python')
else:
self._custom_context = False
ssl_version = ssl_opts.pop('ssl_version', TSSLBase.SSL_VERSION)
self._init_context(ssl_version)
self.cert_reqs = ssl_opts.pop('cert_reqs', ssl.CERT_REQUIRED)
self.ca_certs = ssl_opts.pop('ca_certs', None)
self.keyfile = ssl_opts.pop('keyfile', None)
self.certfile = ssl_opts.pop('certfile', None)
self.ciphers = ssl_opts.pop('ciphers', None)
if ssl_opts:
raise ValueError(
'Unknown keyword arguments: ', ' '.join(ssl_opts.keys()))
if self._should_verify:
if not self.ca_certs:
raise ValueError(
'ca_certs is needed when cert_reqs is not ssl.CERT_NONE')
if not os.access(self.ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (self.ca_certs))
@property
def certfile(self):
return self._certfile
@certfile.setter
def certfile(self, certfile):
if self._server_side and not certfile:
raise ValueError('certfile is needed for server-side')
if certfile and not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self._certfile = certfile
def _wrap_socket(self, sock):
if self._has_ssl_context:
if not self._custom_context:
self.ssl_context.verify_mode = self.cert_reqs
if self.certfile:
self.ssl_context.load_cert_chain(self.certfile,
self.keyfile)
if self.ciphers:
self.ssl_context.set_ciphers(self.ciphers)
if self.ca_certs:
self.ssl_context.load_verify_locations(self.ca_certs)
return self.ssl_context.wrap_socket(
sock, server_side=self._server_side,
server_hostname=self._server_hostname)
else:
ssl_opts = {
'ssl_version': self._ssl_version,
'server_side': self._server_side,
'ca_certs': self.ca_certs,
'keyfile': self.keyfile,
'certfile': self.certfile,
'cert_reqs': self.cert_reqs,
}
if self.ciphers:
if self._has_ciphers:
ssl_opts['ciphers'] = self.ciphers
else:
logger.warning(
'ciphers is specified but ignored due to old Python version')
return ssl.wrap_socket(sock, **ssl_opts)
| TSSLBase |
python | kamyu104__LeetCode-Solutions | Python/evaluate-boolean-binary-tree.py | {
"start": 139,
"end": 1216
} | class ____(object):
def evaluateTree(self, root):
"""
:type root: Optional[TreeNode]
:rtype: bool
"""
INF = float("inf")
OP = {
2: lambda x, y: x or y,
3: lambda x, y: x and y
}
def iter_dfs(root):
ret = [0]
stk = [(1, (root, ret))]
while stk:
step, args = stk.pop()
if step == 1:
node, ret = args
if node.left == node.right:
ret[0] = node.val
continue
ret1, ret2 = [0], [0]
stk.append((2, (node, ret1, ret2, ret)))
stk.append((1, (node.right, ret2)))
stk.append((1, (node.left, ret1)))
elif step == 2:
node, ret1, ret2, ret = args
ret[0] = OP[node.val](ret1[0], ret2[0])
return ret[0]
return iter_dfs(root)
# Time: O(n)
# Space: O(h)
# dfs with recursion
| Solution |
python | django__django | tests/model_fields/models.py | {
"start": 4463,
"end": 4535
} | class ____(models.Model):
field = models.DurationField()
| DurationModel |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ItemGroup.py | {
"start": 92,
"end": 504
} | class ____(GraphicsObject):
"""
Replacement for QGraphicsItemGroup
"""
def __init__(self, *args):
GraphicsObject.__init__(self, *args)
self.setFlag(self.GraphicsItemFlag.ItemHasNoContents)
def boundingRect(self):
return QtCore.QRectF()
def paint(self, *args):
pass
def addItem(self, item):
item.setParentItem(self)
| ItemGroup |
python | rapidsai__cudf | python/cudf/cudf/core/udf/groupby_typing.py | {
"start": 1327,
"end": 2472
} | class ____(numba.types.Type):
"""
Numba extension type carrying metadata associated with a single
GroupBy group. This metadata ultimately is passed to the CUDA
__device__ function which actually performs the work.
"""
def __init__(self, group_scalar_type, index_type=index_default_type):
if (
group_scalar_type not in SUPPORTED_GROUPBY_NUMBA_TYPES
and not isinstance(group_scalar_type, types.Poison)
):
# A frame containing an column with an unsupported dtype
# is calling groupby apply. Construct a GroupType with
# a poisoned type so we can later error if this group is
# used in the UDF body
group_scalar_type = types.Poison(group_scalar_type)
self.group_scalar_type = group_scalar_type
self.index_type = index_type
self.group_data_type = types.CPointer(group_scalar_type)
self.group_size_type = group_size_type
self.group_index_type = types.CPointer(index_type)
super().__init__(
name=f"Group({self.group_scalar_type}, {self.index_type})"
)
| GroupType |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 35836,
"end": 36030
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("DISMISSED", "FIXED", "OPEN")
| RepositoryVulnerabilityAlertState |
python | getsentry__sentry | src/sentry/api/endpoints/organization_tagkey_values.py | {
"start": 884,
"end": 3584
} | class ____(OrganizationEventsEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization, key) -> Response:
if not TAG_KEY_RE.match(key):
return Response({"detail": f'Invalid tag key format for "{key}"'}, status=400)
sentry_sdk.set_tag("query.tag_key", key)
dataset = None
if request.GET.get("dataset"):
try:
dataset = Dataset(request.GET.get("dataset"))
sentry_sdk.set_tag("dataset", dataset.value)
except ValueError:
raise ParseError(detail="Invalid dataset parameter")
elif request.GET.get("includeTransactions") == "1":
sentry_sdk.set_tag("dataset", Dataset.Discover.value)
elif request.GET.get("includeReplays") == "1":
sentry_sdk.set_tag("dataset", Dataset.Replays.value)
else:
sentry_sdk.set_tag("dataset", Dataset.Events.value)
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
paginator: SequencePaginator[TagValue] = SequencePaginator([])
else:
with handle_query_errors():
# Flags are stored on the same table as tags but on a different column. Ideally
# both could be queried in a single request. But at present we're not sure if we
# want to treat tags and flags as the same or different and in which context.
if request.GET.get("useFlagsBackend") == "1":
backend = tagstore.flag_backend
else:
backend = tagstore.backend
paginator = backend.get_tag_value_paginator_for_projects(
snuba_params.project_ids,
snuba_params.environment_ids,
key,
snuba_params.start_date,
snuba_params.end_date,
dataset=dataset,
query=request.GET.get("query"),
order_by=validate_sort_field(request.GET.get("sort", "-last_seen")),
include_transactions=request.GET.get("includeTransactions") == "1",
include_sessions=request.GET.get("includeSessions") == "1",
include_replays=request.GET.get("includeReplays") == "1",
tenant_ids={"organization_id": organization.id},
)
return self.paginate(
request=request,
paginator=paginator,
on_results=lambda results: serialize(results, request.user),
)
| OrganizationTagKeyValuesEndpoint |
python | openai__openai-python | src/openai/lib/streaming/chat/_events.py | {
"start": 665,
"end": 829
} | class ____(GenericModel, Generic[ResponseFormatT]):
type: Literal["content.done"]
content: str
parsed: Optional[ResponseFormatT] = None
| ContentDoneEvent |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 119231,
"end": 120673
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(9799340796)
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50), random_state=self.rng)
assert np.all(vals >= 0)
assert np.shape(vals) == (2, 50)
assert vals.dtype.char in typecodes['AllInteger']
val = stats.poisson.rvs(0.5, random_state=self.rng)
assert isinstance(val, int)
val = stats.poisson(0.5).rvs(3, random_state=self.rng)
assert isinstance(val, np.ndarray)
assert val.dtype.char in typecodes['AllInteger']
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
| TestPoisson |
python | Netflix__metaflow | test/core/tests/task_exception.py | {
"start": 67,
"end": 981
} | class ____(MetaflowTest):
"""
A test to validate if exceptions are stored and retrieved correctly
"""
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
SHOULD_FAIL = True
@steps(0, ["singleton-end"], required=True)
def step_start(self):
raise KeyError("Something has gone wrong")
@steps(2, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run is not None:
for task in run["end"]:
assert_equals("KeyError" in str(task.exception), True)
assert_equals(task.exception.exception, "'Something has gone wrong'")
| TaskExceptionTest |
python | dagster-io__dagster | python_modules/libraries/dagster-omni/dagster_omni/objects.py | {
"start": 2914,
"end": 3313
} | class ____:
id: str
name: str
query_config: OmniQueryConfig
@classmethod
def from_json(cls, data: dict[str, Any]) -> "OmniQuery":
"""Create OmniQuery from JSON response data."""
return cls(
id=data["id"],
name=data["name"],
query_config=OmniQueryConfig.from_json(data["query"]),
)
@whitelist_for_serdes
@record
| OmniQuery |
python | huggingface__transformers | tests/models/git/test_modeling_git.py | {
"start": 1425,
"end": 4403
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=32,
patch_size=16,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return GitVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = GitVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| GitVisionModelTester |
python | pytorch__pytorch | torch/distributed/tensor/parallel/style.py | {
"start": 18353,
"end": 26080
} | class ____(ParallelStyle):
"""
Configure the nn.Module's inputs to convert the input tensors of the nn.Module to DTensors at runtime according to
``input_layouts``, and perform layout redistribution according to the ``desired_input_layouts``.
Keyword Args:
input_layouts (Union[Placement, Tuple[Optional[Placement]]]):
The DTensor layouts of input tensors for the nn.Module, this is used to convert the input tensors to
DTensors. If some inputs are not torch.Tensor or no need to convert to DTensors, ``None`` need to be specified
as a placeholder. default: None.
desired_input_layouts (Union[Placement, Tuple[Optional[Placement]]]):
The desired DTensor layout of input tensors for the nn.Module, this is used to ensure the inputs of the nn.Module
have the desired DTensor layouts. This argument needs to have the same length with ``input_layouts``. default: None.
input_kwarg_layouts (Dict[str, Placement]):
The DTensor layouts of input kwargs for the nn.Module, this is used to convert the input kwarg tensors to DTensors.
default: None
desired_input_kwarg_layouts: (Dict[str, Placement]):
The desired DTensor layout of input kwargs for the nn.Module, this is used to ensure the inputs of the nn.Module
have the desired DTensor layouts. default: None.
use_local_output (bool, optional):
Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module inputs, default: False.
Returns:
A :class:`ParallelStyle` object that prepares the sharding layouts of the nn.Module's inputs.
Example::
>>> # xdoctest: +SKIP(failing)
>>> from torch.distributed.tensor.parallel import parallelize_module, PrepareModuleInput
>>> from torch.distributed.device_mesh import init_device_mesh
>>> ...
>>> block = TransformerBlock(...) # block is a nn.Module that contains an "attn" Attention submodule
>>> tp_mesh = init_device_mesh("cuda", (8,))
>>>
>>> # According to the style specified below, the first input of attn will be annotated to Sharded DTensor
>>> # and then redistributed to Replicated DTensor.
>>> parallelize_module(
>>> block, # this can be a submodule or module
>>> tp_mesh,
>>> parallelize_plan={
>>> "attn": PrepareModuleInput(
>>> input_layouts=(Shard(0), None, None, ...),
>>> desired_input_layouts=(Replicate(), None, None, ...)
>>> ),
>>> }
>>> )
"""
def __init__(
self,
*,
input_layouts: Placement | tuple[Placement | None, ...] | None = None,
desired_input_layouts: Placement | tuple[Placement | None, ...] | None = None,
input_kwarg_layouts: dict[str, Placement] | None = None,
desired_input_kwarg_layouts: dict[str, Placement] | None = None,
use_local_output: bool = False,
):
self.input_layouts = (
(input_layouts,) if isinstance(input_layouts, Placement) else input_layouts
)
self.desired_input_layouts = (
(desired_input_layouts,)
if isinstance(desired_input_layouts, Placement)
else desired_input_layouts
)
self.use_local_output = use_local_output
if self.input_layouts is not None:
assert self.desired_input_layouts is not None, (
"desired module inputs should not be None!"
)
assert len(self.input_layouts) == len(self.desired_input_layouts), (
"input_layouts and desired_input_layouts should have same length!"
)
self.with_kwargs = input_kwarg_layouts is not None
self.input_kwarg_layouts = input_kwarg_layouts or {}
self.desired_input_kwarg_layouts = desired_input_kwarg_layouts or {}
if self.with_kwargs:
assert len(self.input_kwarg_layouts) == len(
self.desired_input_kwarg_layouts
), (
"input_kwarg_layouts and desired_input_kwarg_layouts should have same length!"
)
def _prepare_input_arg(
self,
input: Any,
mesh: DeviceMesh,
input_layout: Placement | None,
desired_layout: Placement | None,
):
if input_layout is not None:
if isinstance(input, DTensor):
# TODO: re-enable the check once we fix the compile path
# assert inp.placements[0] == input_layout
dt_inp = input
else:
assert isinstance(input, torch.Tensor), (
"expecting input to be a torch.Tensor!"
)
dt_inp = DTensor.from_local(
input, mesh, (input_layout,), run_check=False
)
if desired_layout is not None and input_layout != desired_layout:
dt_inp = dt_inp.redistribute(placements=(desired_layout,))
return dt_inp.to_local() if self.use_local_output else dt_inp
else:
return input
def _prepare_input_fn(self, inputs, device_mesh):
if self.input_layouts is None:
return inputs
prepared_inputs = []
if not isinstance(inputs, tuple):
inputs = (inputs,)
if len(inputs) != len(self.input_layouts):
raise ValueError("module inputs and input_layouts should have same length!")
assert self.desired_input_layouts is not None, (
"desired module inputs should not be None!"
)
for inp, input_layout, desired_layout in zip(
inputs, self.input_layouts, self.desired_input_layouts
):
prepared_inputs.append(
self._prepare_input_arg(inp, device_mesh, input_layout, desired_layout)
)
return tuple(prepared_inputs)
def _prepare_input_kwarg_fn(self, inputs, kwarg_inputs, device_mesh):
prepared_arg_inputs = self._prepare_input_fn(inputs, device_mesh)
prepared_kwarg_inputs = {}
for kwarg_key in kwarg_inputs:
kwarg_val = kwarg_inputs[kwarg_key]
input_layout = self.input_kwarg_layouts.get(kwarg_key)
desired_input_layout = self.desired_input_kwarg_layouts.get(kwarg_key)
prepared_kwarg_inputs[kwarg_key] = self._prepare_input_arg(
kwarg_val, device_mesh, input_layout, desired_input_layout
)
return (prepared_arg_inputs, prepared_kwarg_inputs)
def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module:
if self.with_kwargs:
module.register_forward_pre_hook(
lambda _, inputs, kwargs: self._prepare_input_kwarg_fn(
inputs, kwargs, device_mesh
),
with_kwargs=True,
) # type: ignore[misc]
else:
module.register_forward_pre_hook(
lambda _, inputs: self._prepare_input_fn(inputs, device_mesh)
) # type: ignore[misc, call-arg]
return module
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += f"input_layouts={self.input_layouts}, "
tmpstr += f"desired_input_layouts={self.desired_input_layouts}, "
tmpstr += f"input_kwarg_layouts={self.input_kwarg_layouts}, "
tmpstr += f"desired_input_kwarg_layouts={self.desired_input_kwarg_layouts}, "
tmpstr += f"use_local_output={self.use_local_output}"
tmpstr += ")"
return tmpstr
| PrepareModuleInput |
python | openai__openai-python | src/openai/cli/_errors.py | {
"start": 196,
"end": 471
} | class ____(CLIError): ...
def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
if isinstance(err, SilentCLIError):
return
sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err))
| SilentCLIError |
python | numba__numba | numba/tests/test_builtins.py | {
"start": 35626,
"end": 44239
} | class ____(TestCase):
def test_isinstance(self):
pyfunc = isinstance_usecase
cfunc = jit(nopython=True)(pyfunc)
inputs = (
3, # int
5.0, # float
"Hello", # string
b'world', # bytes
1j, # complex
[1, 2, 3], # list
(1, 3, 3, 3), # UniTuple
set([1, 2]), # set
(1, 'nba', 2), # Heterogeneous Tuple
# {'hello': 2}, # dict - doesn't work as input
None,
)
for inpt in inputs:
expected = pyfunc(inpt)
got = cfunc(inpt)
self.assertEqual(expected, got)
def test_isinstance_dict(self):
# Tests typed.Dict and LiteralStrKeyDict
pyfunc = isinstance_dict
cfunc = jit(nopython=True)(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_isinstance_issue9125(self):
pyfunc = invalid_isinstance_usecase_phi_nopropagate2
cfunc = jit(nopython=True)(pyfunc)
self.assertEqual(pyfunc(3), cfunc(3))
def test_isinstance_numba_types(self):
# This makes use of type aliasing between python scalars and NumPy
# scalars, see also test_numba_types()
pyfunc = isinstance_usecase_numba_types
cfunc = jit(nopython=True)(pyfunc)
inputs = (
(types.int32(1), 'int32'),
(types.int64(2), 'int64'),
(types.float32(3.0), 'float32'),
(types.float64(4.0), 'float64'),
(types.complex64(5j), 'no match'),
(typed.List([1, 2]), 'typed list'),
(typed.Dict.empty(types.int64, types.int64), 'typed dict')
)
for inpt, expected in inputs:
got = cfunc(inpt)
self.assertEqual(expected, got)
def test_isinstance_numba_types_2(self):
pyfunc = isinstance_usecase_numba_types_2
cfunc = jit(nopython=True)(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_isinstance_invalid_type(self):
pyfunc = isinstance_usecase_invalid_type
cfunc = jit(nopython=True)(pyfunc)
# valid type
self.assertTrue(cfunc(3.4))
# invalid type
msg = 'Cannot infer Numba type of Python type'
with self.assertRaises(errors.TypingError) as raises:
cfunc(100)
self.assertIn(msg, str(raises.exception))
def test_isinstance_exceptions(self):
fns = [
(invalid_isinstance_usecase,
'Cannot infer Numba type of Python type'),
(invalid_isinstance_usecase_phi_nopropagate,
('isinstance() cannot determine the type of variable "z" due to a '
'branch.')),
(invalid_isinstance_optional_usecase,
('isinstance() cannot determine the type of variable "z" due to a '
'branch.')),
(invalid_isinstance_unsupported_type_usecase(),
('isinstance() does not support variables of type "ntpl(')),
]
for fn, msg in fns:
fn = njit(fn)
with self.assertRaises(errors.TypingError) as raises:
fn(100)
self.assertIn(msg, str(raises.exception))
def test_combinations(self):
# Combinatorically test common classes and instances
def gen_w_arg(clazz_type):
def impl(x):
return isinstance(x, clazz_type)
return impl
clazz_types = (int, float, complex, str, list, tuple, bytes, set, range,
np.int8, np.float32,)
instances = (1, 2.3, 4j, '5', [6,], (7,), b'8', {9,}, None,
(10, 11, 12), (13, 'a', 14j), np.array([15, 16, 17]),
np.int8(18), np.float32(19),
typed.Dict.empty(types.unicode_type, types.float64),
typed.List.empty_list(types.complex128), np.ones(4))
for ct in clazz_types:
fn = njit(gen_w_arg(ct))
for x in instances:
expected = fn.py_func(x)
got = fn(x)
self.assertEqual(got, expected)
def test_numba_types(self):
# Check types which are Numba types, this would break without the jit
# decorator in all cases except numba.typed containers.
def gen_w_arg(clazz_type):
def impl():
return isinstance(1, clazz_type)
return impl
clazz_types = (types.Integer, types.Float, types.Array,)
msg = "Numba type classes.*are not supported"
for ct in clazz_types:
fn = njit(gen_w_arg(ct))
with self.assertRaises(errors.TypingError) as raises:
fn()
self.assertRegex(str(raises.exception), msg)
def test_python_numpy_scalar_alias_problem(self):
# There's a problem due to Python and NumPy scalars being aliased in the
# type system. This is because e.g. int scalar values and NumPy np.intp
# type alias to types.intp. This test merely records this fact.
@njit
def foo():
return isinstance(np.intp(10), int)
self.assertEqual(foo(), True)
self.assertEqual(foo.py_func(), False)
@njit
def bar():
return isinstance(1, np.intp)
self.assertEqual(bar(), True)
self.assertEqual(bar.py_func(), False)
def test_branch_prune(self):
# Check that isinstance branches are pruned allowing otherwise
# impossible type specific specialisation.
@njit
def foo(x):
if isinstance(x, str):
return x + 'some_string'
elif isinstance(x, complex):
return np.imag(x)
elif isinstance(x, tuple):
return len(x)
else:
assert 0
for x in ('string', 1 + 2j, ('a', 3, 4j)):
expected = foo.py_func(x)
got = foo(x)
self.assertEqual(got, expected)
def test_branch_prune_and_bind_to_sig(self):
# see issue 9795
@register_jitable
def f(x, y):
return x + y
@njit
def call_f(x):
if isinstance(x, tuple):
return f(*x)
else:
return f(x)
# The issue is that without isinstance and branch pruning working
# correctly, an attempt will be made to bind the function `f` with
# argument `x`. If `x` is a Tuple type, this will fail on the `else`
# branch as `f` takes two arguments opposed to one.
x = (1, 2)
self.assertEqual(call_f(x), call_f.py_func(x))
# This should raise as partial type inference and branch pruning will
# remove the `f(*x)` branch and just leave `f(x)`, which then won't
# bind because `f` takes two arguments and only one is supplied.
with self.assertRaises(errors.TypingError) as raises:
call_f(1)
msg = str(raises.exception)
self.assertIn("Cannot bind", msg)
self.assertIn("TypeError: missing a required argument: 'y'", msg)
def test_branch_prune_non_tuples_as_star_arg(self):
# see issue 9795
@register_jitable
def f(x, y):
return x + y
@register_jitable
def g(x):
return x
@njit
def call_f(x):
if isinstance(x, tuple):
return f(*x)
else:
return g(x)
# The issue is that without isinstance and branch pruning working
# correctly, an attempt will be made to bind the function `f` with
# argument `x`. If `x` is a non-tuple type `*x` will not bind to the
# signature of `f`.
x = 1
self.assertEqual(call_f(x), call_f.py_func(x))
def test_branch_prune_literal_as_star_arg(self):
# see issue 9795
@register_jitable
def f(x, y):
return x + y
@register_jitable
def g(x):
return x
one = 1
@njit
def call_f():
x = one
if isinstance(x, tuple):
return f(*x)
else:
return g(x)
# The issue is that without isinstance and branch pruning working
# correctly, an attempt will be made to bind the function `f` with
# argument `x`. If `x` is a non-tuple const value type `*x` will not
# bind to the signature of `f`.
self.assertEqual(call_f(), call_f.py_func())
| TestIsinstanceBuiltin |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_radar01.py | {
"start": 315,
"end": 1345
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_radar01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "radar"})
chart.axis_ids = [56801152, 56802688]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | numba__numba | numba/tests/test_builtins.py | {
"start": 6918,
"end": 34449
} | class ____(TestCase):
def run_nullary_func(self, pyfunc, flags):
cfunc = jit((), **flags)(pyfunc)
expected = pyfunc()
self.assertPreciseEqual(cfunc(), expected)
def test_abs(self, flags=forceobj_flags):
pyfunc = abs_usecase
cfunc = jit((types.int32,), **flags)(pyfunc)
for x in [-1, 0, 1]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
cfunc = jit((types.float32,), **flags)(pyfunc)
for x in [-1.1, 0.0, 1.1]:
self.assertPreciseEqual(cfunc(x), pyfunc(x), prec='single')
complex_values = [-1.1 + 0.5j, 0.0 + 0j, 1.1 + 3j,
float('inf') + 1j * float('nan'),
float('nan') - 1j * float('inf')]
cfunc = jit((types.complex64,), **flags)(pyfunc)
for x in complex_values:
self.assertPreciseEqual(cfunc(x), pyfunc(x), prec='single')
cfunc = jit((types.complex128,), **flags)(pyfunc)
for x in complex_values:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
for unsigned_type in types.unsigned_domain:
unsigned_values = [0, 10, 2, 2 ** unsigned_type.bitwidth - 1]
cfunc = jit((unsigned_type,), **flags)(pyfunc)
for x in unsigned_values:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_abs_npm(self):
self.test_abs(flags=no_pyobj_flags)
def test_all(self, flags=forceobj_flags):
pyfunc = all_usecase
cfunc = jit((types.int32,types.int32), **flags)(pyfunc)
x_operands = [-1, 0, 1, None]
y_operands = [-1, 0, 1, None]
for x, y in itertools.product(x_operands, y_operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_all_npm(self):
with self.assertTypingError():
self.test_all(flags=no_pyobj_flags)
def test_any(self, flags=forceobj_flags):
pyfunc = any_usecase
cfunc = jit((types.int32,types.int32), **flags)(pyfunc)
x_operands = [-1, 0, 1, None]
y_operands = [-1, 0, 1, None]
for x, y in itertools.product(x_operands, y_operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_any_npm(self):
with self.assertTypingError():
self.test_any(flags=no_pyobj_flags)
def test_bool(self, flags=forceobj_flags):
pyfunc = bool_usecase
cfunc = jit((types.int32,), **flags)(pyfunc)
for x in [-1, 0, 1]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
cfunc = jit((types.float64,), **flags)(pyfunc)
for x in [0.0, -0.0, 1.5, float('inf'), float('nan')]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
cfunc = jit((types.complex128,), **flags)(pyfunc)
for x in [complex(0, float('inf')), complex(0, float('nan'))]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_bool_npm(self):
self.test_bool(flags=no_pyobj_flags)
def test_bool_nonnumber(self, flags=forceobj_flags):
pyfunc = bool_usecase
cfunc = jit((types.string,), **flags)(pyfunc)
for x in ['x', '']:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
cfunc = jit((types.Dummy('list'),), **flags)(pyfunc)
for x in [[1], []]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_bool_nonnumber_npm(self):
with self.assertTypingError():
self.test_bool_nonnumber(flags=no_pyobj_flags)
def test_complex(self, flags=forceobj_flags):
pyfunc = complex_usecase
cfunc = jit((types.int32, types.int32), **flags)(pyfunc)
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_complex_npm(self):
self.test_complex(flags=no_pyobj_flags)
def test_divmod_ints(self, flags=forceobj_flags):
pyfunc = divmod_usecase
cfunc = jit((types.int64, types.int64), **flags)(pyfunc)
def truncate_result(x, bits=64):
# Remove any extraneous bits (since Numba will return
# a 64-bit result by definition)
if x >= 0:
x &= (1 << (bits - 1)) - 1
return x
denominators = [1, 3, 7, 15, -1, -3, -7, -15, 2**63 - 1, -2**63]
numerators = denominators + [0]
for x, y, in itertools.product(numerators, denominators):
expected_quot, expected_rem = pyfunc(x, y)
quot, rem = cfunc(x, y)
f = truncate_result
self.assertPreciseEqual((f(quot), f(rem)),
(f(expected_quot), f(expected_rem)))
for x in numerators:
with self.assertRaises(ZeroDivisionError):
cfunc(x, 0)
def test_divmod_ints_npm(self):
self.test_divmod_ints(flags=no_pyobj_flags)
def test_divmod_floats(self, flags=forceobj_flags):
pyfunc = divmod_usecase
cfunc = jit((types.float64, types.float64), **flags)(pyfunc)
denominators = [1., 3.5, 1e100, -2., -7.5, -1e101,
np.inf, -np.inf, np.nan]
numerators = denominators + [-0.0, 0.0]
for x, y, in itertools.product(numerators, denominators):
expected_quot, expected_rem = pyfunc(x, y)
quot, rem = cfunc(x, y)
self.assertPreciseEqual((quot, rem), (expected_quot, expected_rem))
for x in numerators:
with self.assertRaises(ZeroDivisionError):
cfunc(x, 0.0)
def test_divmod_floats_npm(self):
self.test_divmod_floats(flags=no_pyobj_flags)
def test_enumerate(self, flags=forceobj_flags):
self.run_nullary_func(enumerate_usecase, flags)
def test_enumerate_npm(self):
self.test_enumerate(flags=no_pyobj_flags)
def test_enumerate_start(self, flags=forceobj_flags):
self.run_nullary_func(enumerate_start_usecase, flags)
def test_enumerate_start_npm(self):
self.test_enumerate_start(flags=no_pyobj_flags)
def test_enumerate_start_invalid_start_type(self):
pyfunc = enumerate_invalid_start_usecase
cfunc = jit((), **forceobj_flags)(pyfunc)
with self.assertRaises(TypeError) as raises:
cfunc()
msg = "'float' object cannot be interpreted as an integer"
self.assertIn(msg, str(raises.exception))
def test_enumerate_start_invalid_start_type_npm(self):
pyfunc = enumerate_invalid_start_usecase
with self.assertRaises(errors.TypingError) as raises:
jit((), **no_pyobj_flags)(pyfunc)
msg = "Only integers supported as start value in enumerate"
self.assertIn(msg, str(raises.exception))
def test_filter(self, flags=forceobj_flags):
pyfunc = filter_usecase
argtys = (types.Dummy('list'), types.Dummy('function_ptr'))
cfunc = jit(argtys, **flags)(pyfunc)
filter_func = lambda x: x % 2
x = [0, 1, 2, 3, 4]
self.assertSequenceEqual(list(cfunc(x, filter_func)),
list(pyfunc(x, filter_func)))
def test_filter_npm(self):
with self.assertTypingError():
self.test_filter(flags=no_pyobj_flags)
def test_float(self, flags=forceobj_flags):
pyfunc = float_usecase
cfunc = jit((types.int32,), **flags)(pyfunc)
for x in [-1, 0, 1]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
cfunc = jit((types.float32,), **flags)(pyfunc)
for x in [-1.1, 0.0, 1.1]:
self.assertPreciseEqual(cfunc(x), pyfunc(x), prec='single')
cfunc = jit((types.string,), **flags)(pyfunc)
for x in ['-1.1', '0.0', '1.1', 'inf', '-inf', 'INF', '-INF']:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_float_npm(self):
with self.assertTypingError():
self.test_float(flags=no_pyobj_flags)
def test_float_string_literal(self):
pyfunc = float_inf_usecase
cfunc = njit(pyfunc)
for x in range(8):
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_format(self, flags=forceobj_flags):
pyfunc = format_usecase
cfunc = jit((types.string, types.int32,), **flags)(pyfunc)
x = '{0}'
for y in [-1, 0, 1]:
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
cfunc = jit((types.string, types.float32,), **flags)(pyfunc)
x = '{0}'
for y in [-1.1, 0.0, 1.1]:
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
cfunc = jit((types.string, types.string,), **flags)(pyfunc)
x = '{0}'
for y in ['a', 'b', 'c']:
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_format_npm(self):
with self.assertTypingError():
self.test_format(flags=no_pyobj_flags)
def test_globals(self, flags=forceobj_flags):
pyfunc = globals_usecase
cfunc = jit((), **flags)(pyfunc)
g = cfunc()
self.assertIs(g, globals())
def test_globals_npm(self):
with self.assertTypingError():
self.test_globals(flags=no_pyobj_flags)
def test_globals_jit(self, flags=forceobj_flags):
# Issue #416: weird behaviour of globals() in combination with
# the @jit decorator.
pyfunc = globals_usecase
jitted = jit(**flags)(pyfunc)
self.assertIs(jitted(), globals())
self.assertIs(jitted(), globals())
def test_globals_jit_npm(self):
with self.assertTypingError():
self.test_globals_jit(nopython=True)
def test_hex(self, flags=forceobj_flags):
pyfunc = hex_usecase
cfunc = jit((types.int32,), **flags)(pyfunc)
for x in [-1, 0, 1]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_hex_npm(self):
with self.assertTypingError():
self.test_hex(flags=no_pyobj_flags)
def test_int_str(self):
pyfunc = str_usecase
small_inputs = [
1234,
1,
0,
10,
1000,
]
large_inputs = [
123456789,
2222222,
1000000,
~0x0
]
args = [*small_inputs, *large_inputs]
typs = [
types.int8,
types.int16,
types.int32,
types.int64,
types.uint,
types.uint8,
types.uint16,
types.uint32,
types.uint64,
]
for typ in typs:
cfunc = jit((typ,), **nrt_no_pyobj_flags)(pyfunc)
for v in args:
tp_info = np.iinfo(typ.key)
if not (tp_info.min <= v <= tp_info.max):
continue
self.assertPreciseEqual(cfunc(typ(v)), pyfunc(typ(v)))
if typ.signed:
self.assertPreciseEqual(cfunc(typ(-v)), pyfunc(typ(-v)))
def test_int(self, flags=forceobj_flags):
pyfunc = int_usecase
cfunc = jit((types.string, types.int32,), **flags)(pyfunc)
x_operands = ['-1', '0', '1', '10']
y_operands = [2, 8, 10, 16]
for x, y in itertools.product(x_operands, y_operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_int_npm(self):
with self.assertTypingError():
self.test_int(flags=no_pyobj_flags)
def test_iter_next(self, flags=forceobj_flags):
pyfunc = iter_next_usecase
cfunc = jit((types.UniTuple(types.int32, 3),), **flags)(pyfunc)
self.assertPreciseEqual(cfunc((1, 42, 5)), (1, 42))
cfunc = jit((types.UniTuple(types.int32, 1),), **flags)(pyfunc)
with self.assertRaises(StopIteration):
cfunc((1,))
def test_iter_next_npm(self):
self.test_iter_next(flags=no_pyobj_flags)
def test_locals(self, flags=forceobj_flags):
pyfunc = locals_usecase
with self.assertRaises(errors.ForbiddenConstruct):
jit((types.int64,), **flags)(pyfunc)
def test_locals_forceobj(self):
self.test_locals(flags=forceobj_flags)
def test_locals_npm(self):
with self.assertTypingError():
self.test_locals(flags=no_pyobj_flags)
def test_map(self, flags=forceobj_flags):
pyfunc = map_usecase
argtys = (types.Dummy('list'), types.Dummy('function_ptr'))
cfunc = jit(argtys, **flags)(pyfunc)
map_func = lambda x: x * 2
x = [0, 1, 2, 3, 4]
self.assertSequenceEqual(list(cfunc(x, map_func)),
list(pyfunc(x, map_func)))
def test_map_npm(self):
with self.assertTypingError():
self.test_map(flags=no_pyobj_flags)
#
# min() and max()
#
def check_minmax_1(self, pyfunc, flags):
cfunc = jit((types.int32, types.int32), **flags)(pyfunc)
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_max_1(self, flags=forceobj_flags):
"""
max(*args)
"""
self.check_minmax_1(max_usecase1, flags)
def test_min_1(self, flags=forceobj_flags):
"""
min(*args)
"""
self.check_minmax_1(min_usecase1, flags)
def test_max_npm_1(self):
self.test_max_1(flags=no_pyobj_flags)
def test_min_npm_1(self):
self.test_min_1(flags=no_pyobj_flags)
def check_minmax_2(self, pyfunc, flags):
cfunc = jit((types.int32, types.int32), **flags)(pyfunc)
x_operands = [-1, 0, 1]
y_operands = [-1, 0, 1]
for x, y in itertools.product(x_operands, y_operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_max_2(self, flags=forceobj_flags):
"""
max(list)
"""
self.check_minmax_2(max_usecase2, flags)
def test_min_2(self, flags=forceobj_flags):
"""
min(list)
"""
self.check_minmax_2(min_usecase2, flags)
def test_max_npm_2(self):
with self.assertTypingError():
self.test_max_2(flags=no_pyobj_flags)
def test_min_npm_2(self):
with self.assertTypingError():
self.test_min_2(flags=no_pyobj_flags)
def check_minmax_3(self, pyfunc, flags):
def check(argty):
cfunc = jit((argty,), **flags)(pyfunc)
# Check that the algorithm matches Python's with a non-total order
tup = (1.5, float('nan'), 2.5)
for val in [tup, tup[::-1]]:
self.assertPreciseEqual(cfunc(val), pyfunc(val))
check(types.UniTuple(types.float64, 3))
check(types.Tuple((types.float32, types.float64, types.float32)))
def test_max_3(self, flags=forceobj_flags):
"""
max(tuple)
"""
self.check_minmax_3(max_usecase3, flags)
def test_min_3(self, flags=forceobj_flags):
"""
min(tuple)
"""
self.check_minmax_3(min_usecase3, flags)
def test_max_npm_3(self):
self.test_max_3(flags=no_pyobj_flags)
def test_min_npm_3(self):
self.test_min_3(flags=no_pyobj_flags)
def check_min_max_invalid_types(self, pyfunc, flags=forceobj_flags):
cfunc = jit((types.int32, types.Dummy('list'),), **flags)(pyfunc)
cfunc(1, [1])
def test_max_1_invalid_types(self):
with self.assertRaises(TypeError):
self.check_min_max_invalid_types(max_usecase1)
def test_max_1_invalid_types_npm(self):
with self.assertTypingError():
self.check_min_max_invalid_types(max_usecase1, flags=no_pyobj_flags)
def test_min_1_invalid_types(self):
with self.assertRaises(TypeError):
self.check_min_max_invalid_types(min_usecase1)
def test_min_1_invalid_types_npm(self):
with self.assertTypingError():
self.check_min_max_invalid_types(min_usecase1, flags=no_pyobj_flags)
def check_minmax_bool1(self, pyfunc, flags):
cfunc = jit((types.bool_, types.bool_), **flags)(pyfunc)
operands = (False, True)
for x, y in itertools.product(operands, operands):
self.assertPreciseEqual(cfunc(x, y), pyfunc(x, y))
def test_max_bool1(self, flags=forceobj_flags):
# tests max(<booleans>)
self.check_minmax_bool1(max_usecase1, flags)
def test_min_bool1(self, flags=forceobj_flags):
# tests min(<booleans>)
self.check_minmax_bool1(min_usecase1, flags)
# Test that max(1) and min(1) fail
def check_min_max_unary_non_iterable(self, pyfunc, flags=forceobj_flags):
cfunc = jit((types.int32,), **flags)(pyfunc)
cfunc(1)
def test_max_unary_non_iterable(self):
with self.assertRaises(TypeError):
self.check_min_max_unary_non_iterable(max_usecase3)
def test_max_unary_non_iterable_npm(self):
with self.assertTypingError():
self.check_min_max_unary_non_iterable(max_usecase3)
def test_min_unary_non_iterable(self):
with self.assertRaises(TypeError):
self.check_min_max_unary_non_iterable(min_usecase3)
def test_min_unary_non_iterable_npm(self):
with self.assertTypingError():
self.check_min_max_unary_non_iterable(min_usecase3)
# Test that max(()) and min(()) fail
def check_min_max_empty_tuple(self, pyfunc, func_name):
with self.assertTypingError() as raises:
jit((), **no_pyobj_flags)(pyfunc)
self.assertIn("%s() argument is an empty tuple" % func_name,
str(raises.exception))
def test_max_empty_tuple(self):
self.check_min_max_empty_tuple(max_usecase4, "max")
def test_min_empty_tuple(self):
self.check_min_max_empty_tuple(min_usecase4, "min")
def test_oct(self, flags=forceobj_flags):
pyfunc = oct_usecase
cfunc = jit((types.int32,), **flags)(pyfunc)
for x in [-8, -1, 0, 1, 8]:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_oct_npm(self):
with self.assertTypingError():
self.test_oct(flags=no_pyobj_flags)
def test_reduce(self, flags=forceobj_flags):
pyfunc = reduce_usecase
argtys = (types.Dummy('function_ptr'), types.Dummy('list'))
cfunc = jit(argtys, **flags)(pyfunc)
reduce_func = lambda x, y: x + y
x = range(10)
self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
x = [x + x/10.0 for x in range(10)]
self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
x = [complex(x, x) for x in range(10)]
self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
def test_reduce_npm(self):
with self.assertTypingError():
self.test_reduce(flags=no_pyobj_flags)
def test_round1(self, flags=forceobj_flags):
pyfunc = round_usecase1
for tp in (types.float64, types.float32):
cfunc = jit((tp,), **flags)(pyfunc)
values = [-1.6, -1.5, -1.4, -0.5, 0.0, 0.1, 0.5, 0.6, 1.4, 1.5, 5.0]
values += [-0.1, -0.0]
for x in values:
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_round1_npm(self):
self.test_round1(flags=no_pyobj_flags)
def test_round2(self, flags=forceobj_flags):
pyfunc = round_usecase2
for tp in (types.float64, types.float32):
prec = 'single' if tp is types.float32 else 'exact'
cfunc = jit((tp, types.int32), **flags)(pyfunc)
for x in [0.0, 0.1, 0.125, 0.25, 0.5, 0.75, 1.25,
1.5, 1.75, 2.25, 2.5, 2.75, 12.5, 15.0, 22.5]:
for n in (-1, 0, 1, 2):
self.assertPreciseEqual(cfunc(x, n), pyfunc(x, n),
prec=prec)
expected = pyfunc(-x, n)
self.assertPreciseEqual(cfunc(-x, n), pyfunc(-x, n),
prec=prec)
def test_round2_npm(self):
self.test_round2(flags=no_pyobj_flags)
def test_sum_objmode(self, flags=forceobj_flags):
pyfunc = sum_usecase
cfunc = jit((types.Dummy('list'),), **flags)(pyfunc)
x = range(10)
self.assertPreciseEqual(cfunc(x), pyfunc(x))
x = [x + x/10.0 for x in range(10)]
self.assertPreciseEqual(cfunc(x), pyfunc(x))
x = [complex(x, x) for x in range(10)]
self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_sum(self):
# In Python 3.8+ "start" can be specified as a kwarg, so test that too
sum_default = njit(sum_usecase)
sum_kwarg = njit(sum_kwarg_usecase)
@njit
def sum_range(sz, start=0):
tmp = range(sz)
ret = sum(tmp, start)
return sum(tmp, start=start), ret
ntpl = namedtuple('ntpl', ['a', 'b'])
# check call with default kwarg, start=0
def args():
yield [*range(10)]
yield [x + x/10.0 for x in range(10)]
yield [x * 1j for x in range(10)]
yield (1, 2, 3)
yield (1, 2, 3j)
# uints will likely end up as floats as `start` is signed, so just
# test mixed signed ints
yield (np.int64(32), np.int32(2), np.int8(3))
tl = typed.List(range(5))
yield tl
yield np.ones(5)
yield ntpl(100, 200)
yield ntpl(100, 200j)
for x in args():
self.assertPreciseEqual(sum_default(x), sum_default.py_func(x))
# Check the uint use case, as start is signed, NumPy will end up with
# a float result whereas Numba will end up with an int (see integer
# typing NBEP).
x = (np.uint64(32), np.uint32(2), np.uint8(3))
self.assertEqual(sum_default(x), sum_default.py_func(x))
# check call with changing default kwarg, start
def args_kws():
yield [*range(10)], 12
yield [x + x/10.0 for x in range(10)], 19j
yield [x * 1j for x in range(10)], -2
yield (1, 2, 3), 9
yield (1, 2, 3j), -0
# uints will likely end up as floats as `start` is signed, so just
# test mixed signed ints
yield (np.int64(32), np.int32(2), np.int8(3)), np.uint32(7)
tl = typed.List(range(5))
yield tl, 100
yield np.ones((5, 5)), 10 * np.ones((5,))
yield ntpl(100, 200), -50
yield ntpl(100, 200j), 9
for x, start in args_kws():
self.assertPreciseEqual(sum_kwarg(x, start=start),
sum_kwarg.py_func(x, start=start))
# check call with range()
for start in range(-3, 4):
for sz in range(-3, 4):
self.assertPreciseEqual(sum_range(sz, start=start),
sum_range.py_func(sz, start=start))
def test_sum_exceptions(self):
sum_default = njit(sum_usecase)
sum_kwarg = njit(sum_kwarg_usecase)
# check start as string/bytes/bytearray is error
msg = "sum() can't sum {}"
with self.assertRaises(errors.TypingError) as raises:
sum_kwarg((1, 2, 3), 'a')
self.assertIn(msg.format('strings'), str(raises.exception))
with self.assertRaises(errors.TypingError) as raises:
sum_kwarg((1, 2, 3), b'123')
self.assertIn(msg.format('bytes'), str(raises.exception))
with self.assertRaises(errors.TypingError) as raises:
sum_kwarg((1, 2, 3), bytearray(b'123'))
self.assertIn(msg.format('bytearray'), str(raises.exception))
# check invalid type has no impl
with self.assertRaises(errors.TypingError) as raises:
sum_default('abcd')
self.assertIn('No implementation', str(raises.exception))
def test_truth(self):
pyfunc = truth_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertEqual(pyfunc(True), cfunc(True))
self.assertEqual(pyfunc(False), cfunc(False))
def test_type_unary(self):
# Test type(val) and type(val)(other_val)
pyfunc = type_unary_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(*args):
expected = pyfunc(*args)
self.assertPreciseEqual(cfunc(*args), expected)
check(1.5, 2)
check(1, 2.5)
check(1.5j, 2)
check(True, 2)
check(2.5j, False)
def test_zip(self, flags=forceobj_flags):
self.run_nullary_func(zip_usecase, flags)
def test_zip_npm(self):
self.test_zip(flags=no_pyobj_flags)
def test_zip_1(self, flags=forceobj_flags):
self.run_nullary_func(zip_1_usecase, flags)
def test_zip_1_npm(self):
self.test_zip_1(flags=no_pyobj_flags)
def test_zip_3(self, flags=forceobj_flags):
self.run_nullary_func(zip_3_usecase, flags)
def test_zip_3_npm(self):
self.test_zip_3(flags=no_pyobj_flags)
def test_zip_0(self, flags=forceobj_flags):
self.run_nullary_func(zip_0_usecase, flags)
def test_zip_0_npm(self):
self.test_zip_0(flags=no_pyobj_flags)
def test_zip_first_exhausted(self, flags=forceobj_flags):
"""
Test side effect to the input iterators when a left iterator has been
exhausted before the ones on the right.
"""
self.run_nullary_func(zip_first_exhausted, flags)
def test_zip_first_exhausted_npm(self):
self.test_zip_first_exhausted(flags=nrt_no_pyobj_flags)
def test_pow_op_usecase(self):
args = [
(2, 3),
(2.0, 3),
(2, 3.0),
(2j, 3.0j),
]
for x, y in args:
argtys = (typeof(x), typeof(y))
cfunc = jit(argtys, **no_pyobj_flags)(pow_op_usecase)
r = cfunc(x, y)
self.assertPreciseEqual(r, pow_op_usecase(x, y))
def test_pow_usecase(self):
args = [
(2, 3),
(2.0, 3),
(2, 3.0),
(2j, 3.0j),
]
for x, y in args:
argtys = (typeof(x), typeof(y))
cfunc = jit(argtys, **no_pyobj_flags)(pow_usecase)
r = cfunc(x, y)
self.assertPreciseEqual(r, pow_usecase(x, y))
def _check_min_max(self, pyfunc):
cfunc = njit()(pyfunc)
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(expected, got)
def test_min_max_iterable_input(self):
@njit
def frange(start, stop, step):
i = start
while i < stop:
yield i
i += step
def sample_functions(op):
yield lambda: op(range(10))
yield lambda: op(range(4, 12))
yield lambda: op(range(-4, -15, -1))
yield lambda: op([6.6, 5.5, 7.7])
yield lambda: op([(3, 4), (1, 2)])
yield lambda: op(frange(1.1, 3.3, 0.1))
yield lambda: op([np.nan, -np.inf, np.inf, np.nan])
yield lambda: op([(3,), (1,), (2,)])
for fn in sample_functions(op=min):
self._check_min_max(fn)
for fn in sample_functions(op=max):
self._check_min_max(fn)
| TestBuiltins |
python | mkdocs__mkdocs | mkdocs/structure/toc.py | {
"start": 754,
"end": 1606
} | class ____:
"""A single entry in the table of contents."""
def __init__(self, title: str, id: str, level: int) -> None:
self.title, self.id, self.level = title, id, level
self.children = []
title: str
"""The text of the item, as HTML."""
@property
def url(self) -> str:
"""The hash fragment of a URL pointing to the item."""
return '#' + self.id
level: int
"""The zero-based level of the item."""
children: list[AnchorLink]
"""An iterable of any child items."""
def __str__(self) -> str:
return self.indent_print()
def indent_print(self, depth: int = 0) -> str:
indent = ' ' * depth
ret = f'{indent}{self.title} - {self.url}\n'
for item in self.children:
ret += item.indent_print(depth + 1)
return ret
| AnchorLink |
python | tiangolo__fastapi | fastapi/security/http.py | {
"start": 10264,
"end": 13553
} | class ____(HTTPBase):
"""
HTTP Digest authentication.
**Warning**: this is only a stub to connect the components with OpenAPI in FastAPI,
but it doesn't implement the full Digest scheme, you would need to to subclass it
and implement it in your code.
Ref: https://datatracker.ietf.org/doc/html/rfc7616
## Usage
Create an instance object and use that object as the dependency in `Depends()`.
The dependency result will be an `HTTPAuthorizationCredentials` object containing
the `scheme` and the `credentials`.
## Example
```python
from typing import Annotated
from fastapi import Depends, FastAPI
from fastapi.security import HTTPAuthorizationCredentials, HTTPDigest
app = FastAPI()
security = HTTPDigest()
@app.get("/users/me")
def read_current_user(
credentials: Annotated[HTTPAuthorizationCredentials, Depends(security)]
):
return {"scheme": credentials.scheme, "credentials": credentials.credentials}
```
"""
def __init__(
self,
*,
scheme_name: Annotated[
Optional[str],
Doc(
"""
Security scheme name.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
Security scheme description.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
auto_error: Annotated[
bool,
Doc(
"""
By default, if the HTTP Digest is not provided, `HTTPDigest` will
automatically cancel the request and send the client an error.
If `auto_error` is set to `False`, when the HTTP Digest is not
available, instead of erroring out, the dependency result will
be `None`.
This is useful when you want to have optional authentication.
It is also useful when you want to have authentication that can be
provided in one of multiple optional ways (for example, in HTTP
Digest or in a cookie).
"""
),
] = True,
):
self.model = HTTPBaseModel(scheme="digest", description=description)
self.scheme_name = scheme_name or self.__class__.__name__
self.auto_error = auto_error
async def __call__(
self, request: Request
) -> Optional[HTTPAuthorizationCredentials]:
authorization = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if not (authorization and scheme and credentials):
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
if scheme.lower() != "digest":
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials)
| HTTPDigest |
python | pypa__pip | src/pip/_vendor/cachecontrol/filewrapper.py | {
"start": 281,
"end": 4291
} | class ____:
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
The data is stored in a temporary file until it is all available. As long
as the temporary files directory is disk-based (sometimes it's a
memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
pressure is high. For small files the disk usually won't be used at all,
it'll all be in the filesystem memory cache, so there should be no
performance impact.
"""
def __init__(
self, fp: HTTPResponse, callback: Callable[[bytes], None] | None
) -> None:
self.__buf = NamedTemporaryFile("rb+", delete=True)
self.__fp = fp
self.__callback = callback
def __getattr__(self, name: str) -> Any:
# The vagaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop things from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__("_CallbackFileWrapper__fp")
return getattr(fp, name)
def __is_fp_closed(self) -> bool:
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
closed: bool = self.__fp.closed
return closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self) -> None:
if self.__callback:
if self.__buf.tell() == 0:
# Empty file:
result = b""
else:
# Return the data without actually loading it into memory,
# relying on Python's buffer API and mmap(). mmap() just gives
# a view directly into the filesystem's memory cache, so it
# doesn't result in duplicate memory use.
self.__buf.seek(0, 0)
result = memoryview(
mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ)
)
self.__callback(result)
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
# Closing the temporary file releases memory and frees disk space.
# Important when caching big files.
self.__buf.close()
def read(self, amt: int | None = None) -> bytes:
data: bytes = self.__fp.read(amt)
if data:
# We may be dealing with b'', a sign that things are over:
# it's passed e.g. after we've already closed self.__buf.
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt: int) -> bytes:
data: bytes = self.__fp._safe_read(amt) # type: ignore[attr-defined]
if amt == 2 and data == b"\r\n":
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
| CallbackFileWrapper |
python | vyperlang__vyper | vyper/venom/analysis/mem_ssa.py | {
"start": 537,
"end": 1869
} | class ____:
"""Base class for memory SSA nodes"""
def __init__(self, id: int):
self.id = id
self.reaching_def: Optional[MemoryAccess] = None
self.loc: MemoryLocation = MemoryLocation.EMPTY
@property
def is_live_on_entry(self) -> bool:
return self.id == 0
@property
def inst(self) -> IRInstruction:
raise NotImplementedError(f"{type(self)} does not have an inst!")
@property
def is_volatile(self) -> bool:
"""
Indicates whether this memory access is volatile.
A volatile memory access means the memory location can be accessed
or modified in ways that might not be tracked by the SSA analysis.
This is used to handle memory locations that might be accessed
through other function calls or other side effects.
"""
return self.loc.is_volatile
@property
def id_str(self) -> str:
if self.is_live_on_entry:
return "live_on_entry"
return f"{self.id}"
def __eq__(self, other: object) -> bool:
if not isinstance(other, MemoryAccess):
return False
return self.id == other.id
def __hash__(self) -> int:
return self.id
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.id_str})"
| MemoryAccess |
python | scrapy__scrapy | scrapy/commands/bench.py | {
"start": 914,
"end": 1372
} | class ____:
def __enter__(self) -> None:
pargs = [sys.executable, "-u", "-m", "scrapy.utils.benchserver"]
self.proc = subprocess.Popen( # noqa: S603
pargs, stdout=subprocess.PIPE, env=get_testenv()
)
assert self.proc.stdout
self.proc.stdout.readline()
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.proc.kill()
self.proc.wait()
time.sleep(0.2)
| _BenchServer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_merge_cells.py | {
"start": 393,
"end": 1939
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_merge_cells() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
self.worksheet.str_table = SharedStringTable()
def test_write_merge_cells_1(self):
"""Test the _write_merge_cells() method"""
cell_format = Format()
self.worksheet.merge_range(2, 1, 2, 2, "Foo", cell_format)
self.worksheet._write_merge_cells()
exp = """<mergeCells count="1"><mergeCell ref="B3:C3"/></mergeCells>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_merge_cells_2(self):
"""Test the _write_merge_cells() method"""
cell_format = Format()
self.worksheet.merge_range("B3:C3", "Foo", cell_format)
self.worksheet._write_merge_cells()
exp = """<mergeCells count="1"><mergeCell ref="B3:C3"/></mergeCells>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_merge_cells_3(self):
"""Test the _write_merge_cells() method"""
cell_format = Format()
self.worksheet.merge_range("B3:C3", "Foo", cell_format)
self.worksheet.merge_range("A2:D2", "Foo", cell_format)
self.worksheet._write_merge_cells()
exp = """<mergeCells count="2"><mergeCell ref="B3:C3"/><mergeCell ref="A2:D2"/></mergeCells>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteMergeCells |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 7607,
"end": 7947
} | class ____(AbstractTemplate):
def generic(self, args, kws):
# For datetime64 comparisons, all units are inter-comparable
left, right = args
if not all(isinstance(tp, types.NPDatetime) for tp in args):
return
return signature(types.boolean, left, right)
@infer_global(operator.eq)
| DatetimeCmpOp |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 45950,
"end": 46264
} | class ____(BaseModel, frozen=True):
targets: List[Target] = Field(description="List of targets for the given route.")
route_prefix: str = Field(description="Prefix route of the targets.")
protocol: RequestProtocol = Field(description="Protocol of the targets.")
@PublicAPI(stability="stable")
| TargetGroup |
python | getsentry__sentry | src/sentry/incidents/metric_issue_detector.py | {
"start": 6908,
"end": 17484
} | class ____(BaseDetectorTypeValidator):
data_sources = serializers.ListField(
child=SnubaQueryValidator(timeWindowSeconds=True), required=False
)
condition_group = MetricIssueConditionGroupValidator(required=True)
def validate(self, attrs):
attrs = super().validate(attrs)
if "condition_group" in attrs:
conditions = attrs.get("condition_group", {}).get("conditions")
if len(conditions) > 3:
raise serializers.ValidationError("Too many conditions")
return attrs
def _validate_transaction_dataset_deprecation(self, dataset: Dataset) -> None:
organization = self.context.get("organization")
if organization is None:
raise serializers.ValidationError("Missing organization context")
if features.has("organizations:discover-saved-queries-deprecation", organization):
if dataset in [Dataset.PerformanceMetrics, Dataset.Transactions]:
raise serializers.ValidationError(
"Creation of transaction-based alerts is disabled, as we migrate to the span dataset. Create span-based alerts (dataset: events_analytics_platform) with the is_transaction:true filter instead."
)
def _validate_extrapolation_mode(self, extrapolation_mode: ExtrapolationMode) -> None:
if extrapolation_mode == ExtrapolationMode.SERVER_WEIGHTED:
raise serializers.ValidationError(
"server_weighted extrapolation mode is not supported for new detectors."
)
def get_quota(self) -> DetectorQuota:
organization = self.context.get("organization")
request = self.context.get("request")
if organization is None or request is None:
raise serializers.ValidationError("Missing organization/request context")
detector_limit = quotas.backend.get_metric_detector_limit(organization.id)
if (
not features.has(
"organizations:workflow-engine-metric-detector-limit",
organization,
actor=request.user,
)
or detector_limit == -1
):
return DetectorQuota(has_exceeded=False, limit=-1, count=-1)
detector_count = Detector.objects.filter(
project__organization=organization,
type="metric_issue", # Avoided circular import. TODO: move magic strings to constant file
status=ObjectStatus.ACTIVE,
).count()
has_exceeded = detector_count >= detector_limit
return DetectorQuota(has_exceeded=has_exceeded, limit=detector_limit, count=detector_count)
def is_editing_transaction_dataset(
self, snuba_query: SnubaQuery, data_source: SnubaQueryDataSourceType
) -> bool:
if data_source.get("dataset") in [Dataset.PerformanceMetrics, Dataset.Transactions] and (
data_source.get("dataset", Dataset(snuba_query.dataset)) != Dataset(snuba_query.dataset)
or data_source.get("query", snuba_query.query) != snuba_query.query
or data_source.get("aggregate", snuba_query.aggregate) != snuba_query.aggregate
or data_source.get("time_window", snuba_query.time_window) != snuba_query.time_window
or data_source.get("event_types", snuba_query.event_types) != snuba_query.event_types
):
return True
return False
def update_data_source(
self, instance: Detector, data_source: SnubaQueryDataSourceType, seer_updated: bool = False
):
try:
source_instance = DataSource.objects.get(detector=instance)
except DataSource.DoesNotExist:
return
if source_instance:
try:
query_subscription = QuerySubscription.objects.get(id=source_instance.source_id)
except QuerySubscription.DoesNotExist:
raise serializers.ValidationError("QuerySubscription not found, can't update")
if query_subscription:
try:
snuba_query = SnubaQuery.objects.get(id=query_subscription.snuba_query.id)
except SnubaQuery.DoesNotExist:
raise serializers.ValidationError("SnubaQuery not found, can't update")
event_types = SnubaQueryEventType.objects.filter(snuba_query_id=snuba_query.id)
if self.is_editing_transaction_dataset(snuba_query, data_source):
raise serializers.ValidationError(
"Updates to transaction-based alerts is disabled, as we migrate to the span dataset. Create span-based alerts (dataset: events_analytics_platform) with the is_transaction:true filter instead."
)
old_extrapolation_mode = snuba_query.extrapolation_mode
new_extrapolation_mode = data_source.get(
"extrapolation_mode", snuba_query.extrapolation_mode
)
if data_source.get("dataset") == Dataset.EventsAnalyticsPlatform:
if is_invalid_extrapolation_mode(old_extrapolation_mode, new_extrapolation_mode):
raise serializers.ValidationError(
"Invalid extrapolation mode for this detector type."
)
# Handle a dynamic detector's snuba query changing
if instance.config.get("detection_type") == AlertRuleDetectionType.DYNAMIC:
try:
validated_data_source: dict[str, Any] = {"data_sources": [data_source]}
if not seer_updated:
update_detector_data(instance, validated_data_source)
except Exception:
# don't update the snuba query if we failed to send data to Seer
raise serializers.ValidationError(
"Failed to send data to Seer, cannot update detector"
)
extrapolation_mode = format_extrapolation_mode(
data_source.get("extrapolation_mode", snuba_query.extrapolation_mode)
)
update_snuba_query(
snuba_query=snuba_query,
query_type=data_source.get("query_type", snuba_query.type),
dataset=data_source.get("dataset", snuba_query.dataset),
query=data_source.get("query", snuba_query.query),
aggregate=data_source.get("aggregate", snuba_query.aggregate),
time_window=timedelta(seconds=data_source.get("time_window", snuba_query.time_window)),
resolution=timedelta(seconds=data_source.get("resolution", snuba_query.resolution)),
environment=data_source.get("environment", snuba_query.environment),
event_types=data_source.get("event_types", [event_type for event_type in event_types]),
extrapolation_mode=extrapolation_mode,
)
def update_anomaly_detection(self, instance: Detector, validated_data: dict[str, Any]) -> bool:
"""
When data changes on a detector we may need to tell Seer to update or remove their data for the detector
"""
seer_updated = False
is_currently_dynamic_detector = (
instance.config.get("detection_type") == AlertRuleDetectionType.DYNAMIC
)
is_update_dynamic_detector = (
validated_data.get("config", {}).get("detection_type") == AlertRuleDetectionType.DYNAMIC
)
if not is_currently_dynamic_detector and is_update_dynamic_detector:
# Detector has been changed to become a dynamic detector
try:
update_detector_data(instance, validated_data)
seer_updated = True
except Exception:
# Don't update if we failed to send data to Seer
raise serializers.ValidationError(
"Failed to send data to Seer, cannot update detector"
)
elif (
validated_data.get("config")
and is_currently_dynamic_detector
and not is_update_dynamic_detector
):
# Detector has been changed from a dynamic detector to another type
delete_data_in_seer_for_detector(instance)
return seer_updated
def update(self, instance: Detector, validated_data: dict[str, Any]):
# Handle anomaly detection changes first in case we need to exit before saving so that the instance values do not get updated
seer_updated = self.update_anomaly_detection(instance, validated_data)
super().update(instance, validated_data)
# Handle enable/disable query subscriptions
if "enabled" in validated_data:
enabled = validated_data.get("enabled")
assert isinstance(enabled, bool)
query_subscriptions = QuerySubscription.objects.filter(
id__in=[data_source.source_id for data_source in instance.data_sources.all()]
)
if query_subscriptions:
enable_disable_subscriptions(query_subscriptions, enabled)
# Handle data sources
data_source: SnubaQueryDataSourceType | None = None
if "data_sources" in validated_data:
data_source = validated_data.pop("data_sources")[0]
if data_source is not None:
self.update_data_source(instance, data_source, seer_updated)
instance.save()
schedule_update_project_config(instance)
return instance
def create(self, validated_data: dict[str, Any]):
if "data_sources" in validated_data:
for validated_data_source in validated_data["data_sources"]:
self._validate_transaction_dataset_deprecation(validated_data_source.get("dataset"))
self._validate_extrapolation_mode(validated_data_source.get("extrapolation_mode"))
detector = super().create(validated_data)
if detector.config.get("detection_type") == AlertRuleDetectionType.DYNAMIC.value:
try:
send_new_detector_data(detector)
except Exception:
# Sending historical data failed; Detector won't be saved, but we
# need to clean up database state that has already been created.
detector.workflow_condition_group.delete()
raise
schedule_update_project_config(detector)
return detector
def delete(self):
# Let Seer know we're deleting a dynamic detector so the data can be deleted there too
assert self.instance is not None
detector: Detector = self.instance
delete_data_in_seer_for_detector(detector)
super().delete()
| MetricIssueDetectorValidator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_autofilter06.py | {
"start": 315,
"end": 2622
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("autofilter06.xlsx")
self.set_text_file("autofilter_data.txt")
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test corresponds to the following examples/autofilter.pl example:
Example 6. Autofilter with filter for non-blanks.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter("A1:D51")
# Add filter criteria.
worksheet.filter_column(0, "x == NonBlanks")
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row("A1", headers)
# Start writing data after the headers.
row = 1
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Simulate a blank cell in the data.
if row == 6:
data[0] = ""
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region != "":
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={"hidden": True})
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 293729,
"end": 294199
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node", "role")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("Organization", graphql_name="node")
role = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseUserAccountMembershipRole), graphql_name="role"
)
| EnterpriseOrganizationMembershipEdge |
python | conda__conda | tests/env/test_env.py | {
"start": 967,
"end": 13021
} | class ____:
def __init__(self):
self.output = ""
def write(self, chunk):
self.output += chunk.decode("utf-8")
def get_environment(filename):
return from_file(support_file(filename))
def get_simple_environment():
return get_environment("simple.yml")
def get_valid_keys_environment():
return get_environment("valid_keys.yml")
def get_invalid_keys_environment():
return get_environment("invalid_keys.yml")
def test_returns_Environment():
e = get_simple_environment()
assert isinstance(e, EnvironmentYaml)
def test_retains_full_filename():
e = get_simple_environment()
assert support_file("simple.yml") == e.filename
def test_with_pip():
e = from_file(support_file("with-pip.yml"))
assert "pip" in e.dependencies
assert "foo" in e.dependencies["pip"]
assert "baz" in e.dependencies["pip"]
@pytest.mark.timeout(20)
def test_add_pip():
e = from_file(support_file("add-pip.yml"))
expected = {
"conda": ["pip", "car"],
"pip": ["foo", "baz"],
}
assert e.dependencies == expected
@pytest.mark.integration
def test_http():
e = get_simple_environment()
f = from_file(
"https://raw.githubusercontent.com/conda/conda/main/tests/env/support/simple.yml"
)
assert e.dependencies == f.dependencies
assert e.dependencies == f.dependencies
@pytest.mark.integration
def test_http_raises():
with pytest.raises(CondaHTTPError):
from_file(
"https://raw.githubusercontent.com/conda/conda/main/tests/env/support/does-not-exist.yml"
)
def test_envvars():
current_conda_token = os.environ.get("CONDA_TOKEN")
os.environ["CONDA_TOKEN"] = "aaa-12345"
os.environ["OTHER_KEY"] = "12345-aaa"
e = get_environment("channels_with_envvars.yml")
assert set(e.channels) == {
"https://localhost/t/aaa-12345/stable",
"https://localhost/t/12345-aaa/stable",
"conda-forge",
"defaults",
}
if current_conda_token:
os.environ["CONDA_TOKEN"] = current_conda_token
else:
del os.environ["CONDA_TOKEN"]
del os.environ["OTHER_KEY"]
def test_has_empty_filename_by_default():
e = EnvironmentYaml()
assert e.filename is None
def test_has_filename_if_provided():
r = random.randint(100, 200)
random_filename = f"/path/to/random/environment-{r}.yml"
e = EnvironmentYaml(filename=random_filename)
assert e.filename == random_filename
def test_has_empty_name_by_default():
e = EnvironmentYaml()
assert e.name is None
def test_has_name_if_provided():
random_name = f"random-{random.randint(100, 200)}"
e = EnvironmentYaml(name=random_name)
assert e.name == random_name
def test_dependencies_are_empty_by_default():
e = EnvironmentYaml()
assert not e.dependencies
def test_parses_dependencies_from_raw_file():
e = get_simple_environment()
expected = {"conda": ["nltk"]}
assert e.dependencies == expected
def test_builds_spec_from_line_raw_dependency():
# TODO Refactor this inside conda to not be a raw string
e = EnvironmentYaml(dependencies=["nltk=3.0.0=np18py27_0"])
expected = {"conda": ["nltk==3.0.0=np18py27_0"]}
assert e.dependencies == expected
def test_args_are_wildcarded():
e = EnvironmentYaml(dependencies=["python=2.7"])
expected = {"conda": ["python=2.7"]}
assert e.dependencies == expected
def test_other_tips_of_dependencies_are_supported():
e = EnvironmentYaml(dependencies=["nltk", {"pip": ["foo", "bar"]}])
expected = {
"conda": ["nltk", "pip"],
"pip": ["foo", "bar"],
}
assert e.dependencies == expected
def test_channels_default_to_empty_list():
e = EnvironmentYaml()
assert isinstance(e.channels, list)
assert not e.channels
def test_add_channels():
e = EnvironmentYaml()
e.add_channels(["dup", "dup", "unique"])
assert e.channels == ["dup", "unique"]
def test_remove_channels():
e = EnvironmentYaml(channels=["channel"])
e.remove_channels()
assert not e.channels
def test_channels_are_provided_by_kwarg():
random_channels = (random.randint(100, 200), random)
e = EnvironmentYaml(channels=random_channels)
assert e.channels == random_channels
def test_to_dict_returns_dictionary_of_data():
random_name = f"random{random.randint(100, 200)}"
e = EnvironmentYaml(
name=random_name, channels=["javascript"], dependencies=["nodejs"]
)
expected = {
"name": random_name,
"channels": ["javascript"],
"dependencies": ["nodejs"],
}
assert e.to_dict() == expected
def test_to_dict_returns_just_name_if_only_thing_present():
e = EnvironmentYaml(name="simple")
expected = {"name": "simple"}
assert e.to_dict() == expected
def test_to_yaml_returns_yaml_parseable_string():
random_name = f"random{random.randint(100, 200)}"
e = EnvironmentYaml(
name=random_name, channels=["javascript"], dependencies=["nodejs"]
)
expected = {
"name": random_name,
"channels": ["javascript"],
"dependencies": ["nodejs"],
}
actual = yaml_round_trip_load(StringIO(e.to_yaml()))
assert expected == actual
def test_to_yaml_returns_proper_yaml():
random_name = f"random{random.randint(100, 200)}"
e = EnvironmentYaml(
name=random_name, channels=["javascript"], dependencies=["nodejs"]
)
expected = "\n".join(
[
f"name: {random_name}",
"channels:",
" - javascript",
"dependencies:",
" - nodejs",
"",
]
)
actual = e.to_yaml()
assert expected == actual
def test_to_yaml_takes_stream():
random_name = f"random{random.randint(100, 200)}"
e = EnvironmentYaml(
name=random_name, channels=["javascript"], dependencies=["nodejs"]
)
s = FakeStream()
e.to_yaml(stream=s)
expected = "\n".join(
[
f"name: {random_name}",
"channels:",
" - javascript",
"dependencies:",
" - nodejs",
"",
]
)
assert expected == s.output
def test_can_add_dependencies_to_environment():
e = get_simple_environment()
e.dependencies.add("bar")
s = FakeStream()
e.to_yaml(stream=s)
expected = "\n".join(["name: nlp", "dependencies:", " - nltk", " - bar", ""])
assert expected == s.output
def test_dependencies_update_after_adding():
e = get_simple_environment()
assert "bar" not in e.dependencies["conda"]
e.dependencies.add("bar")
assert "bar" in e.dependencies["conda"]
def test_valid_keys():
e = get_valid_keys_environment()
e_dict = e.to_dict()
for key in VALID_KEYS:
assert key in e_dict
def test_invalid_keys():
with pytest.warns(
PendingDeprecationWarning,
match="The environment file is not fully CEP 24 compliant",
):
e = get_invalid_keys_environment()
e_dict = e.to_dict()
assert "name" in e_dict
assert len(e_dict) == 1
def test_empty_deps():
e = get_environment("empty_deps.yml")
e_dict = e.to_dict()
assert "name" in e_dict
assert "channels" in e_dict
assert len(e_dict) == 2
def test_creates_file_on_save(tmp_path: Path):
tmp = tmp_path / "environment.yml"
assert not tmp.exists()
env = EnvironmentYaml(filename=tmp, name="simple")
env.save()
assert tmp.exists()
assert env.to_yaml() == tmp.read_text()
@pytest.mark.integration
def test_env_advanced_pip(
monkeypatch: MonkeyPatch,
conda_cli: CondaCLIFixture,
path_factory: PathFactoryFixture,
support_file_isolated,
):
monkeypatch.setenv("CONDA_DLL_SEARCH_MODIFICATION_ENABLE", "true")
prefix = path_factory()
assert not prefix.exists()
pip_argh = support_file_isolated("pip_argh.yml")
conda_cli(
*("env", "create"),
*("--prefix", prefix),
*("--file", str(pip_argh)),
)
assert prefix.exists()
PrefixData._cache_.clear()
assert package_is_installed(prefix, "argh==0.26.2")
def test_from_history():
# We're not testing that get_requested_specs_map() actually works
# assume it gives us back a dict of MatchSpecs
with patch("conda.history.History.get_requested_specs_map") as m:
m.return_value = {
"python": MatchSpec("python=3"),
"pytest": MatchSpec("pytest!=3.7.3"),
"mock": MatchSpec("mock"),
"yaml": MatchSpec("yaml>=0.1"),
}
out = from_environment("mock_env", "mock_prefix", from_history=True)
assert "yaml[version='>=0.1']" in out.to_dict()["dependencies"]
assert "pytest!=3.7.3" in out.to_dict()["dependencies"]
assert len(out.to_dict()["dependencies"]) == 4
m.assert_called()
def test_environment_deprecated() -> None:
with pytest.deprecated_call():
Environment(filename="idontexist", name="simple")
@pytest.mark.parametrize(
"dependencies",
(
["python"],
[],
["python", "numpy"],
["python", "pip", {"pip": ["scipy"]}],
[{"something-unknown": "idontknow"}],
),
)
def test_dependency_validation(dependencies):
dependencies_validation(dependencies)
@pytest.mark.parametrize(
"dependencies,error_type,error_message",
(
(
None,
EnvironmentFileInvalid,
"Invalid type for 'dependencies', expected a list",
),
(
["nota~matchspec", "also!!not"],
CondaMultiError,
"Invalid spec 'nota~matchspec'",
),
(["nota~matchspec", "also!!not"], CondaMultiError, "Invalid spec 'also!!not'"),
(
["python", ["this-should", "not-be-a", "list"]],
CondaMultiError,
"is an invalid type",
),
({"wrong": "type"}, EnvironmentFileInvalid, "Invalid type for 'dependencies'"),
),
)
def test_dependency_validation_errors(dependencies, error_type, error_message):
with pytest.raises(error_type, match=error_message):
dependencies_validation(dependencies)
@pytest.mark.parametrize(
"channels,error_message",
(
({"wrong": "type"}, "Invalid type for 'channels'"),
([{"wrong": "type"}], "`channels` key must only contain strings."),
([1], "`channels` key must only contain strings."),
(["one", "two"], None),
),
)
def test_channels_validation(channels, error_message):
if error_message:
with pytest.raises(EnvironmentFileInvalid, match=error_message):
channels_validation(channels)
else:
channels_validation(channels)
@pytest.mark.parametrize(
"variables,error_message",
(
(["wrong", "type"], "Invalid type for 'variables'"),
({"name": "a"}, None),
({"name": 1, "word": True, "list": ["1", "2"]}, None),
),
)
def test_variables_validation(variables, error_message):
if error_message:
with pytest.raises(EnvironmentFileInvalid, match=error_message):
variables_validation(variables)
else:
variables_validation(variables)
@pytest.mark.parametrize(
"name,error_message",
(
(["wrong", "type"], "Invalid type for 'name'"),
(1, "Invalid type for 'name'"),
("name", None),
),
)
def test_name_validation(name, error_message):
if error_message:
with pytest.raises(EnvironmentFileInvalid, match=error_message):
name_validation(name)
else:
name_validation(name)
@pytest.mark.parametrize(
"prefix,error_message",
(
(["wrong", "type"], "Invalid type for 'prefix'"),
(1, "Invalid type for 'prefix'"),
("path/to/prefix", None),
),
)
def test_prefix_validation(prefix, error_message):
if error_message:
with pytest.raises(EnvironmentFileInvalid, match=error_message):
prefix_validation(prefix)
else:
prefix_validation(prefix)
| FakeStream |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/utils.py | {
"start": 685,
"end": 5268
} | class ____(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
# Core metadata spec for `Name`
_validate_regex = re.compile(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
)
_canonicalize_regex = re.compile(r"[-_.]+")
_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:
if validate and not _validate_regex.match(name):
raise InvalidName(f"name is invalid: {name!r}")
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def is_normalized_name(name: str) -> bool:
return _normalized_regex.match(name) is not None
def canonicalize_version(
version: Union[Version, str], *, strip_trailing_zero: bool = True
) -> str:
"""
This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment.
"""
if isinstance(version, str):
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
else:
parsed = version
parts = []
# Epoch
if parsed.epoch != 0:
parts.append(f"{parsed.epoch}!")
# Release segment
release_segment = ".".join(str(x) for x in parsed.release)
if strip_trailing_zero:
# NB: This strips trailing '.0's to normalize
release_segment = re.sub(r"(\.0)+$", "", release_segment)
parts.append(release_segment)
# Pre-release
if parsed.pre is not None:
parts.append("".join(str(x) for x in parsed.pre))
# Post-release
if parsed.post is not None:
parts.append(f".post{parsed.post}")
# Development release
if parsed.dev is not None:
parts.append(f".dev{parsed.dev}")
# Local version segment
if parsed.local is not None:
parts.append(f"+{parsed.local}")
return "".join(parts)
def parse_wheel_filename(
filename: str,
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name.
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename}")
name = canonicalize_name(name_part)
try:
version = Version(parts[1])
except InvalidVersion as e:
raise InvalidWheelFilename(
f"Invalid wheel filename (invalid version): {filename}"
) from e
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in '{filename}'"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
name = canonicalize_name(name_part)
try:
version = Version(version_part)
except InvalidVersion as e:
raise InvalidSdistFilename(
f"Invalid sdist filename (invalid version): {filename}"
) from e
return (name, version)
| InvalidSdistFilename |
python | MongoEngine__mongoengine | tests/fields/test_dict_field.py | {
"start": 255,
"end": 13455
} | class ____(MongoDBTestCase):
def test_storage(self):
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
info = {"testkey": "testvalue"}
post = BlogPost(info=info).save()
assert get_as_pymongo(post) == {"_id": post.id, "info": info}
def test_validate_invalid_type(self):
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
invalid_infos = ["my post", ["test", "test"], {1: "test"}]
for invalid_info in invalid_infos:
with pytest.raises(ValidationError):
BlogPost(info=invalid_info).validate()
def test_keys_with_dots_or_dollars(self):
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
post = BlogPost()
post.info = {"$title": "test"}
with pytest.raises(ValidationError):
post.validate()
post.info = {"nested": {"$title": "test"}}
with pytest.raises(ValidationError):
post.validate()
post.info = {"$title.test": "test"}
with pytest.raises(ValidationError):
post.validate()
post.info = {"nested": {"the.title": "test"}}
if get_mongodb_version() < MONGODB_36:
# MongoDB < 3.6 rejects dots
# To avoid checking the mongodb version from the DictField class
# we rely on MongoDB to reject the data during the save
post.validate()
with pytest.raises(InvalidDocument):
post.save()
else:
post.validate()
post.info = {"dollar_and_dot": {"te$st.test": "test"}}
if get_mongodb_version() < MONGODB_36:
post.validate()
with pytest.raises(InvalidDocument):
post.save()
else:
post.validate()
def test_general_things(self):
"""Ensure that dict types work as expected."""
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
post = BlogPost(info={"title": "test"})
post.save()
post = BlogPost()
post.info = {"title": "dollar_sign", "details": {"te$t": "test"}}
post.save()
post = BlogPost()
post.info = {"details": {"test": "test"}}
post.save()
post = BlogPost()
post.info = {"details": {"test": 3}}
post.save()
assert BlogPost.objects.count() == 4
assert BlogPost.objects.filter(info__title__exact="test").count() == 1
assert BlogPost.objects.filter(info__details__test__exact="test").count() == 1
post = BlogPost.objects.filter(info__title__exact="dollar_sign").first()
assert "te$t" in post["info"]["details"]
# Confirm handles non strings or non existing keys
assert BlogPost.objects.filter(info__details__test__exact=5).count() == 0
assert BlogPost.objects.filter(info__made_up__test__exact="test").count() == 0
post = BlogPost.objects.create(info={"title": "original"})
post.info.update({"title": "updated"})
post.save()
post.reload()
assert "updated" == post.info["title"]
post.info.setdefault("authors", [])
post.save()
post.reload()
assert post.info["authors"] == []
def test_dictfield_dump_document_with_inheritance__cls(self):
"""Ensure a DictField can handle another document's dump."""
class Doc(Document):
field = DictField()
class ToEmbedParent(Document):
id = IntField(primary_key=True)
recursive = DictField()
meta = {"allow_inheritance": True}
class ToEmbedChild(ToEmbedParent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Doc.drop_collection()
ToEmbedParent.drop_collection()
# with a Document with a _cls field
to_embed_recursive = ToEmbedChild(id=1).save()
to_embed_child = ToEmbedChild(
id=2, recursive=to_embed_recursive.to_mongo().to_dict()
).save()
doc_dump_as_dict = to_embed_child.to_mongo().to_dict()
doc = Doc(field=doc_dump_as_dict)
assert Doc.field._auto_dereference is False
assert isinstance(doc.field, dict) # depends on auto_dereference
doc.save()
assert isinstance(doc.field, dict)
expected = {
"_id": 2,
"_cls": "ToEmbedParent.ToEmbedChild",
"recursive": {
"_id": 1,
"_cls": "ToEmbedParent.ToEmbedChild",
"recursive": {},
},
}
assert doc.field == expected
# _ = Doc.objects.first()
# assert Doc.field._auto_dereference is False # Fails, bug #2831
# doc = Doc(field=doc_dump_as_dict)
# assert isinstance(doc.field, dict) # Fails, bug #2831
def test_dictfield_dump_document_no_inheritance(self):
"""Ensure a DictField can handle another document's dump."""
class Doc(Document):
field = DictField()
class ToEmbed(Document):
id = IntField(primary_key=True)
recursive = DictField()
to_embed_recursive = ToEmbed(id=1).save()
to_embed = ToEmbed(
id=2, recursive=to_embed_recursive.to_mongo().to_dict()
).save()
doc = Doc(field=to_embed.to_mongo().to_dict())
doc.save()
assert isinstance(doc.field, dict)
assert doc.field == {"_id": 2, "recursive": {"_id": 1, "recursive": {}}}
def test_dictfield_strict(self):
"""Ensure that dict field handles validation if provided a strict field type."""
class Simple(Document):
mapping = DictField(field=IntField())
Simple.drop_collection()
e = Simple()
e.mapping["someint"] = 1
e.save()
# try creating an invalid mapping
with pytest.raises(ValidationError):
e.mapping["somestring"] = "abc"
e.save()
def test_dictfield_complex(self):
"""Ensure that the dict field can handle the complex types."""
class SettingBase(EmbeddedDocument):
meta = {"allow_inheritance": True}
class StringSetting(SettingBase):
value = StringField()
class IntegerSetting(SettingBase):
value = IntField()
class Simple(Document):
mapping = DictField()
Simple.drop_collection()
e = Simple()
e.mapping["somestring"] = StringSetting(value="foo")
e.mapping["someint"] = IntegerSetting(value=42)
e.mapping["nested_dict"] = {
"number": 1,
"string": "Hi!",
"float": 1.001,
"complex": IntegerSetting(value=42),
"list": [IntegerSetting(value=42), StringSetting(value="foo")],
}
e.save()
e2 = Simple.objects.get(id=e.id)
assert isinstance(e2.mapping["somestring"], StringSetting)
assert isinstance(e2.mapping["someint"], IntegerSetting)
# Test querying
assert Simple.objects.filter(mapping__someint__value=42).count() == 1
assert Simple.objects.filter(mapping__nested_dict__number=1).count() == 1
assert (
Simple.objects.filter(mapping__nested_dict__complex__value=42).count() == 1
)
assert (
Simple.objects.filter(mapping__nested_dict__list__0__value=42).count() == 1
)
assert (
Simple.objects.filter(mapping__nested_dict__list__1__value="foo").count()
== 1
)
# Confirm can update
Simple.objects().update(set__mapping={"someint": IntegerSetting(value=10)})
Simple.objects().update(
set__mapping__nested_dict__list__1=StringSetting(value="Boo")
)
assert (
Simple.objects.filter(mapping__nested_dict__list__1__value="foo").count()
== 0
)
assert (
Simple.objects.filter(mapping__nested_dict__list__1__value="Boo").count()
== 1
)
def test_push_dict(self):
class MyModel(Document):
events = ListField(DictField())
doc = MyModel(events=[{"a": 1}]).save()
raw_doc = get_as_pymongo(doc)
expected_raw_doc = {"_id": doc.id, "events": [{"a": 1}]}
assert raw_doc == expected_raw_doc
MyModel.objects(id=doc.id).update(push__events={})
raw_doc = get_as_pymongo(doc)
expected_raw_doc = {"_id": doc.id, "events": [{"a": 1}, {}]}
assert raw_doc == expected_raw_doc
def test_ensure_unique_default_instances(self):
"""Ensure that every field has it's own unique default instance."""
class D(Document):
data = DictField()
data2 = DictField(default=lambda: {})
d1 = D()
d1.data["foo"] = "bar"
d1.data2["foo"] = "bar"
d2 = D()
assert d2.data == {}
assert d2.data2 == {}
def test_dict_field_invalid_dict_value(self):
class DictFieldTest(Document):
dictionary = DictField(required=True)
DictFieldTest.drop_collection()
test = DictFieldTest(dictionary=None)
test.dictionary # Just access to test getter
with pytest.raises(ValidationError):
test.validate()
test = DictFieldTest(dictionary=False)
test.dictionary # Just access to test getter
with pytest.raises(ValidationError):
test.validate()
def test_dict_field_raises_validation_error_if_wrongly_assign_embedded_doc(self):
class DictFieldTest(Document):
dictionary = DictField(required=True)
DictFieldTest.drop_collection()
class Embedded(EmbeddedDocument):
name = StringField()
embed = Embedded(name="garbage")
doc = DictFieldTest(dictionary=embed)
with pytest.raises(ValidationError) as exc_info:
doc.validate()
error_msg = str(exc_info.value)
assert "'dictionary'" in error_msg
assert "Only dictionaries may be used in a DictField" in error_msg
def test_atomic_update_dict_field(self):
"""Ensure that the entire DictField can be atomically updated."""
class Simple(Document):
mapping = DictField(field=ListField(IntField(required=True)))
Simple.drop_collection()
e = Simple()
e.mapping["someints"] = [1, 2]
e.save()
e.update(set__mapping={"ints": [3, 4]})
e.reload()
assert isinstance(e.mapping, BaseDict)
assert {"ints": [3, 4]} == e.mapping
# try creating an invalid mapping
with pytest.raises(ValueError):
e.update(set__mapping={"somestrings": ["foo", "bar"]})
def test_dictfield_with_referencefield_complex_nesting_cases(self):
"""Ensure complex nesting inside DictField handles dereferencing of ReferenceField(dbref=True | False)"""
# Relates to Issue #1453
class Doc(Document):
s = StringField()
class Simple(Document):
mapping0 = DictField(ReferenceField(Doc, dbref=True))
mapping1 = DictField(ReferenceField(Doc, dbref=False))
mapping2 = DictField(ListField(ReferenceField(Doc, dbref=True)))
mapping3 = DictField(ListField(ReferenceField(Doc, dbref=False)))
mapping4 = DictField(DictField(field=ReferenceField(Doc, dbref=True)))
mapping5 = DictField(DictField(field=ReferenceField(Doc, dbref=False)))
mapping6 = DictField(ListField(DictField(ReferenceField(Doc, dbref=True))))
mapping7 = DictField(ListField(DictField(ReferenceField(Doc, dbref=False))))
mapping8 = DictField(
ListField(DictField(ListField(ReferenceField(Doc, dbref=True))))
)
mapping9 = DictField(
ListField(DictField(ListField(ReferenceField(Doc, dbref=False))))
)
Doc.drop_collection()
Simple.drop_collection()
d = Doc(s="aa").save()
e = Simple()
e.mapping0["someint"] = e.mapping1["someint"] = d
e.mapping2["someint"] = e.mapping3["someint"] = [d]
e.mapping4["someint"] = e.mapping5["someint"] = {"d": d}
e.mapping6["someint"] = e.mapping7["someint"] = [{"d": d}]
e.mapping8["someint"] = e.mapping9["someint"] = [{"d": [d]}]
e.save()
s = Simple.objects.first()
assert isinstance(s.mapping0["someint"], Doc)
assert isinstance(s.mapping1["someint"], Doc)
assert isinstance(s.mapping2["someint"][0], Doc)
assert isinstance(s.mapping3["someint"][0], Doc)
assert isinstance(s.mapping4["someint"]["d"], Doc)
assert isinstance(s.mapping5["someint"]["d"], Doc)
assert isinstance(s.mapping6["someint"][0]["d"], Doc)
assert isinstance(s.mapping7["someint"][0]["d"], Doc)
assert isinstance(s.mapping8["someint"][0]["d"][0], Doc)
assert isinstance(s.mapping9["someint"][0]["d"][0], Doc)
| TestDictField |
python | huggingface__transformers | src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py | {
"start": 12942,
"end": 14966
} | class ____(ASTPreTrainedModel):
def __init__(self, config: ASTConfig) -> None:
super().__init__(config)
self.config = config
self.embeddings = ASTEmbeddings(config)
self.encoder = ASTEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> ASTPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs()
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a
`torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library
(`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the
mel features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~ASTFeatureExtractor.__call__`]
"""
if input_values is None:
raise ValueError("You have to specify input_values")
embedding_output = self.embeddings(input_values)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
| ASTModel |
python | walkccc__LeetCode | solutions/137. Single Number II/137-2.py | {
"start": 0,
"end": 187
} | class ____:
def singleNumber(self, nums: list[int]) -> int:
ones = 0
twos = 0
for num in nums:
ones ^= (num & ~twos)
twos ^= (num & ~ones)
return ones
| Solution |
python | PyCQA__pylint | tests/functional/u/unsupported/unsupported_assignment_operation.py | {
"start": 645,
"end": 1763
} | class ____:
def __setitem__(self, key, value):
return key + value
NonSubscriptable()[0] = 24 # [unsupported-assignment-operation]
NonSubscriptable[0] = 24 # [unsupported-assignment-operation]
Subscriptable()[0] = 24
Subscriptable[0] = 24 # [unsupported-assignment-operation]
# generators are not subscriptable
def powers_of_two():
k = 0
while k < 10:
yield 2 ** k
k += 1
powers_of_two()[0] = 42 # [unsupported-assignment-operation]
powers_of_two[0] = 42 # [unsupported-assignment-operation]
# check that primitive non subscriptable types are caught
True[0] = 24 # [unsupported-assignment-operation]
None[0] = 42 # [unsupported-assignment-operation]
8.5[0] = 24 # [unsupported-assignment-operation]
10[0] = 24 # [unsupported-assignment-operation]
# sets are not subscriptable
{x ** 2 for x in range(10)}[0] = 24 # [unsupported-assignment-operation]
set(numbers)[0] = 24 # [unsupported-assignment-operation]
frozenset(numbers)[0] = 42 # [unsupported-assignment-operation]
# skip instances with unknown base classes
from some_missing_module import LibSubscriptable
| Subscriptable |
python | ijl__orjson | test/test_enum.py | {
"start": 157,
"end": 202
} | class ____(int, enum.Enum):
ONE = 1
| IntEnum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.