language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | anthropics__anthropic-sdk-python | tests/test_response.py | {
"start": 8305,
"end": 9592
} | class ____(BaseModel):
a: str
@pytest.mark.parametrize("client", [False], indirect=True) # loose validation
def test_response_parse_expect_model_union_non_json_content(client: Anthropic) -> None:
response = APIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(to=cast(Any, Union[CustomModel, OtherModel]))
assert isinstance(obj, str)
assert obj == "foo"
@pytest.mark.asyncio
@pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation
async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncAnthropic) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}),
client=async_client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = await response.parse(to=cast(Any, Union[CustomModel, OtherModel]))
assert isinstance(obj, str)
assert obj == "foo"
| OtherModel |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 109850,
"end": 111050
} | class ____(Request):
"""
Get an attachment containing the frames returned by the dataview specified in `prepare_download_for_dataview`
:param prepare_id: Call ID returned by a call to prepare_download_for_dataview
:type prepare_id: str
"""
_service = "frames"
_action = "download_for_dataview"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"prepare_id": {
"description": "Call ID returned by a call to prepare_download_for_dataview",
"type": "string",
}
},
"required": ["prepare_id"],
"type": "object",
}
def __init__(self, prepare_id, **kwargs):
super(DownloadForDataviewRequest, self).__init__(**kwargs)
self.prepare_id = prepare_id
@schema_property("prepare_id")
def prepare_id(self):
return self._property_prepare_id
@prepare_id.setter
def prepare_id(self, value):
if value is None:
self._property_prepare_id = None
return
self.assert_isinstance(value, "prepare_id", six.string_types)
self._property_prepare_id = value
| DownloadForDataviewRequest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/dataflow.py | {
"start": 11342,
"end": 17794
} | class ____(BaseTrigger):
"""
Dataflow trigger that checks the state of a Dataflow YAML job.
:param job_id: Required. ID of the job.
:param project_id: Required. The Google Cloud project ID in which the job was started.
:param location: The location where job is executed. If set to None then
the value of DEFAULT_DATAFLOW_LOCATION will be used.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param poll_sleep: Optional. The time in seconds to sleep between polling Google Cloud Platform
for the Dataflow job.
:param cancel_timeout: Optional. How long (in seconds) operator should wait for the pipeline to be
successfully cancelled when task is being killed.
:param expected_terminal_state: Optional. The expected terminal state of the Dataflow job at which the
operator task is set to succeed. Defaults to 'JOB_STATE_DONE' for the batch jobs and
'JOB_STATE_RUNNING' for the streaming jobs.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
def __init__(
self,
job_id: str,
project_id: str | None,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
poll_sleep: int = 10,
cancel_timeout: int | None = 5 * 60,
expected_terminal_state: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
):
super().__init__()
self.project_id = project_id
self.job_id = job_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.poll_sleep = poll_sleep
self.cancel_timeout = cancel_timeout
self.expected_terminal_state = expected_terminal_state
self.impersonation_chain = impersonation_chain
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize class arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataflow.DataflowStartYamlJobTrigger",
{
"project_id": self.project_id,
"job_id": self.job_id,
"location": self.location,
"gcp_conn_id": self.gcp_conn_id,
"poll_sleep": self.poll_sleep,
"expected_terminal_state": self.expected_terminal_state,
"impersonation_chain": self.impersonation_chain,
"cancel_timeout": self.cancel_timeout,
},
)
async def run(self):
"""
Fetch job and yield events depending on the job's type and state.
Yield TriggerEvent if the job reaches a terminal state.
Otherwise awaits for a specified amount of time stored in self.poll_sleep variable.
"""
hook: AsyncDataflowHook = self._get_async_hook()
try:
while True:
job: Job = await hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_state = job.current_state
job_type = job.type_
if job_state.name == self.expected_terminal_state:
yield TriggerEvent(
{
"job": Job.to_dict(job),
"status": "success",
"message": f"Job reached the expected terminal state: {self.expected_terminal_state}.",
}
)
return
elif job_type == JobType.JOB_TYPE_STREAMING and job_state == JobState.JOB_STATE_RUNNING:
yield TriggerEvent(
{
"job": Job.to_dict(job),
"status": "success",
"message": "Streaming job reached the RUNNING state.",
}
)
return
elif job_type == JobType.JOB_TYPE_BATCH and job_state == JobState.JOB_STATE_DONE:
yield TriggerEvent(
{
"job": Job.to_dict(job),
"status": "success",
"message": "Batch job completed.",
}
)
return
elif job_state == JobState.JOB_STATE_FAILED:
yield TriggerEvent(
{
"job": Job.to_dict(job),
"status": "error",
"message": "Job failed.",
}
)
return
elif job_state == JobState.JOB_STATE_STOPPED:
yield TriggerEvent(
{
"job": Job.to_dict(job),
"status": "stopped",
"message": "Job was stopped.",
}
)
return
else:
self.log.info("Current job status is: %s", job_state.name)
self.log.info("Sleeping for %s seconds.", self.poll_sleep)
await asyncio.sleep(self.poll_sleep)
except Exception as e:
self.log.exception("Exception occurred while checking for job completion.")
yield TriggerEvent({"job": None, "status": "error", "message": str(e)})
def _get_async_hook(self) -> AsyncDataflowHook:
return AsyncDataflowHook(
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_sleep,
impersonation_chain=self.impersonation_chain,
cancel_timeout=self.cancel_timeout,
)
| DataflowStartYamlJobTrigger |
python | ansible__ansible | test/lib/ansible_test/_internal/provisioning.py | {
"start": 920,
"end": 7508
} | class ____:
"""State of hosts and profiles to be passed to ansible-test during delegation."""
controller_profile: ControllerHostProfile
target_profiles: list[HostProfile]
@property
def profiles(self) -> list[HostProfile]:
"""Return all the profiles as a list."""
return [t.cast(HostProfile, self.controller_profile)] + self.target_profiles
def serialize(self, path: str) -> None:
"""Serialize the host state to the given path."""
with open_binary_file(path, 'wb') as state_file:
pickle.dump(self, state_file)
@staticmethod
def deserialize(args: EnvironmentConfig, path: str) -> HostState:
"""Deserialize host state from the given args and path."""
with open_binary_file(path) as state_file:
host_state: HostState = pickle.load(state_file)
host_state.controller_profile.args = args
for target in host_state.target_profiles:
target.args = args
return host_state
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing all target hosts from the controller."""
return list(itertools.chain.from_iterable([target.get_controller_target_connections() for
target in self.target_profiles if isinstance(target, SshTargetHostProfile)]))
def targets[THostProfile: HostProfile](self, profile_type: t.Type[THostProfile]) -> list[THostProfile]:
"""The list of target(s), verified to be of the specified type."""
if not self.target_profiles:
raise Exception('No target profiles found.')
assert type_guard(self.target_profiles, profile_type)
return t.cast(list[THostProfile], self.target_profiles)
def prepare_profiles[TEnvironmentConfig: EnvironmentConfig](
args: TEnvironmentConfig,
targets_use_pypi: bool = False,
skip_setup: bool = False,
requirements: t.Optional[c.Callable[[HostProfile], None]] = None,
) -> HostState:
"""
Create new profiles, or load existing ones, and return them.
If a requirements callback was provided, it will be used before configuring hosts if delegation has already been performed.
"""
if args.host_path:
host_state = HostState.deserialize(args, os.path.join(args.host_path, 'state.dat'))
else:
run_pypi_proxy(args, targets_use_pypi)
controller_host_profile = t.cast(ControllerHostProfile, create_host_profile(args, args.controller, None))
host_state = HostState(
controller_profile=controller_host_profile,
target_profiles=[create_host_profile(args, target, controller_host_profile) for target in args.targets],
)
if args.prime_containers:
for host_profile in host_state.profiles:
if isinstance(host_profile, DockerProfile):
host_profile.provision()
raise PrimeContainers()
ExitHandler.register(functools.partial(cleanup_profiles, host_state))
for pre_profile in host_state.profiles:
pre_profile.pre_provision()
def provision(profile: HostProfile) -> None:
"""Provision the given profile."""
profile.provision()
if not skip_setup:
profile.setup()
dispatch_jobs(
[(profile, WrappedThread(functools.partial(provision, profile), f'Provision: {profile}')) for profile in host_state.profiles]
)
host_state.controller_profile.configure()
if not args.delegate:
check_controller_python(args, host_state)
if requirements:
requirements(host_state.controller_profile)
def configure(profile: HostProfile) -> None:
"""Configure the given profile."""
profile.wait()
if not skip_setup:
profile.configure()
if requirements:
requirements(profile)
dispatch_jobs(
[(profile, WrappedThread(functools.partial(configure, profile), f'Configure: {profile}')) for profile in host_state.target_profiles]
)
return host_state
def check_controller_python(args: EnvironmentConfig, host_state: HostState) -> None:
"""Check the running environment to make sure it is what we expected."""
sys_version = version_to_str(sys.version_info[:2])
controller_python = host_state.controller_profile.python
if expected_executable := verify_sys_executable(controller_python.path):
raise ApplicationError(f'Running under Python interpreter "{sys.executable}" instead of "{expected_executable}".')
expected_version = controller_python.version
if expected_version != sys_version:
raise ApplicationError(f'Running under Python version {sys_version} instead of {expected_version}.')
args.controller_python = controller_python
def cleanup_profiles(host_state: HostState) -> None:
"""Cleanup provisioned hosts when exiting."""
for profile in host_state.profiles:
profile.deprovision()
def dispatch_jobs(jobs: list[tuple[HostProfile, WrappedThread]]) -> None:
"""Run the given profile job threads and wait for them to complete."""
for profile, thread in jobs:
thread.daemon = True
thread.start()
while any(thread.is_alive() for profile, thread in jobs):
time.sleep(1)
failed = False
connection_failures = 0
for profile, thread in jobs:
try:
thread.wait_for_result()
except HostConnectionError as ex:
display.error(f'Host {profile.config} connection failed:\n{ex}')
failed = True
connection_failures += 1
except ApplicationError as ex:
display.error(f'Host {profile.config} job failed:\n{ex}')
failed = True
except Exception as ex: # pylint: disable=broad-except
name = f'{"" if ex.__class__.__module__ == "builtins" else ex.__class__.__module__ + "."}{ex.__class__.__qualname__}'
display.error(f'Host {profile.config} job failed:\nTraceback (most recent call last):\n'
f'{"".join(traceback.format_tb(ex.__traceback__)).rstrip()}\n{name}: {ex}')
failed = True
if connection_failures:
raise HostConnectionError(f'Host job(s) failed, including {connection_failures} connection failure(s). See previous error(s) for details.')
if failed:
raise ApplicationError('Host job(s) failed. See previous error(s) for details.')
| HostState |
python | eventlet__eventlet | eventlet/hubs/asyncio.py | {
"start": 879,
"end": 5961
} | class ____(hub.BaseHub):
"""An Eventlet hub implementation on top of an asyncio event loop."""
def __init__(self):
super().__init__()
# Pre-emptively make sure we're using the right modules:
_unmonkey_patch_asyncio_all()
# The presumption is that eventlet is driving the event loop, so we
# want a new one we control.
import asyncio
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.sleep_event = asyncio.Event()
import asyncio.events
if hasattr(asyncio.events, "on_fork"):
# Allow post-fork() child to continue using the same event loop.
# This is a terrible idea.
asyncio.events.on_fork.__code__ = (lambda: None).__code__
else:
# On Python 3.9-3.11, there's a thread local we need to reset.
# Also a terrible idea.
def re_register_loop(loop=self.loop):
asyncio.events._set_running_loop(loop)
os.register_at_fork(after_in_child=re_register_loop)
def add_timer(self, timer):
"""
Register a ``Timer``.
Typically not called directly by users.
"""
super().add_timer(timer)
self.sleep_event.set()
def _file_cb(self, cb, fileno):
"""
Callback called by ``asyncio`` when a file descriptor has an event.
"""
try:
cb(fileno)
except self.SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
self.sleep_event.set()
def add(self, evtype, fileno, cb, tb, mark_as_closed):
"""
Add a file descriptor of given event type to the ``Hub``. See the
superclass for details.
Typically not called directly by users.
"""
try:
os.fstat(fileno)
except OSError:
raise ValueError("Invalid file descriptor")
already_listening = self.listeners[evtype].get(fileno) is not None
listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
if not already_listening:
if evtype == hub.READ:
self.loop.add_reader(fileno, self._file_cb, cb, fileno)
else:
self.loop.add_writer(fileno, self._file_cb, cb, fileno)
return listener
def remove(self, listener):
"""
Remove a listener from the ``Hub``. See the superclass for details.
Typically not called directly by users.
"""
super().remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
if evtype == hub.READ:
self.loop.remove_reader(fileno)
else:
self.loop.remove_writer(fileno)
def remove_descriptor(self, fileno):
"""
Remove a file descriptor from the ``asyncio`` loop.
Typically not called directly by users.
"""
have_read = self.listeners[hub.READ].get(fileno)
have_write = self.listeners[hub.WRITE].get(fileno)
super().remove_descriptor(fileno)
if have_read:
self.loop.remove_reader(fileno)
if have_write:
self.loop.remove_writer(fileno)
def run(self, *a, **kw):
"""
Start the ``Hub`` running. See the superclass for details.
"""
import asyncio
async def async_run():
if self.running:
raise RuntimeError("Already running!")
try:
self.running = True
self.stopping = False
while not self.stopping:
while self.closed:
# We ditch all of these first.
self.close_one()
self.prepare_timers()
if self.debug_blocking:
self.block_detect_pre()
self.fire_timers(self.clock())
if self.debug_blocking:
self.block_detect_post()
self.prepare_timers()
wakeup_when = self.sleep_until()
if wakeup_when is None:
sleep_time = self.default_sleep()
else:
sleep_time = wakeup_when - self.clock()
if sleep_time > 0:
try:
await asyncio.wait_for(self.sleep_event.wait(), sleep_time)
except asyncio.TimeoutError:
pass
self.sleep_event.clear()
else:
await asyncio.sleep(0)
else:
self.timers_canceled = 0
del self.timers[:]
del self.next_timers[:]
finally:
self.running = False
self.stopping = False
self.loop.run_until_complete(async_run())
| Hub |
python | doocs__leetcode | solution/1300-1399/1354.Construct Target Array With Multiple Sums/Solution.py | {
"start": 0,
"end": 398
} | class ____:
def isPossible(self, target: List[int]) -> bool:
s = sum(target)
pq = [-x for x in target]
heapify(pq)
while -pq[0] > 1:
mx = -heappop(pq)
t = s - mx
if t == 0 or mx - t < 1:
return False
x = (mx % t) or t
heappush(pq, -x)
s = s - mx + x
return True
| Solution |
python | getsentry__sentry | src/sentry/tagstore/types.py | {
"start": 4984,
"end": 5659
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs) -> TagValueSerializerResponse:
from sentry import tagstore
key = tagstore.backend.get_standardized_key(obj.key)
serialized: TagValueSerializerResponse = {
"key": key,
"name": tagstore.backend.get_tag_value_label(obj.key, obj.value),
"value": obj.value,
"count": obj.times_seen,
"lastSeen": obj.last_seen,
"firstSeen": obj.first_seen,
}
query = convert_user_tag_to_query(key, obj.value)
if query:
serialized["query"] = query
return serialized
| TagValueSerializer |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_dataproc.py | {
"start": 13222,
"end": 17670
} | class ____:
def test_async_create_batch_trigger_serialization_should_execute_successfully(self, batch_trigger):
"""
Asserts that the DataprocBatchTrigger correctly serializes its arguments
and classpath.
"""
classpath, kwargs = batch_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger"
assert kwargs == {
"batch_id": TEST_BATCH_ID,
"project_id": TEST_PROJECT_ID,
"region": TEST_REGION,
"gcp_conn_id": TEST_GCP_CONN_ID,
"impersonation_chain": None,
"polling_interval_seconds": TEST_POLL_INTERVAL,
}
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger.get_async_hook")
async def test_async_create_batch_trigger_triggers_on_success_should_execute_successfully(
self, mock_get_async_hook, batch_trigger
):
"""
Tests the DataprocBatchTrigger only fires once the batch execution reaches a successful state.
"""
mock_batch = mock.MagicMock()
mock_batch.state = Batch.State.SUCCEEDED
mock_batch.state_message = TEST_BATCH_STATE_MESSAGE
future = asyncio.Future()
future.set_result(mock_batch)
mock_get_async_hook.return_value.get_batch.return_value = future
expected_event = TriggerEvent(
{
"batch_id": TEST_BATCH_ID,
"batch_state": Batch.State.SUCCEEDED.name,
"batch_state_message": TEST_BATCH_STATE_MESSAGE,
}
)
actual_event = await batch_trigger.run().asend(None)
await asyncio.sleep(0.5)
assert expected_event == actual_event
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger.get_async_hook")
async def test_async_create_batch_trigger_run_returns_failed_event(
self, mock_get_async_hook, batch_trigger
):
mock_batch = mock.MagicMock()
mock_batch.state = Batch.State.FAILED
mock_batch.state_message = TEST_BATCH_STATE_MESSAGE
future = asyncio.Future()
future.set_result(mock_batch)
mock_get_async_hook.return_value.get_batch.return_value = future
expected_event = TriggerEvent(
{
"batch_id": TEST_BATCH_ID,
"batch_state": Batch.State.FAILED.name,
"batch_state_message": TEST_BATCH_STATE_MESSAGE,
}
)
actual_event = await batch_trigger.run().asend(None)
await asyncio.sleep(0.5)
assert expected_event == actual_event
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger.get_async_hook")
async def test_create_batch_run_returns_cancelled_event(self, mock_get_async_hook, batch_trigger):
mock_batch = mock.MagicMock()
mock_batch.state = Batch.State.CANCELLED
mock_batch.state_message = TEST_BATCH_STATE_MESSAGE
future = asyncio.Future()
future.set_result(mock_batch)
mock_get_async_hook.return_value.get_batch.return_value = future
expected_event = TriggerEvent(
{
"batch_id": TEST_BATCH_ID,
"batch_state": Batch.State.CANCELLED.name,
"batch_state_message": TEST_BATCH_STATE_MESSAGE,
}
)
actual_event = await batch_trigger.run().asend(None)
await asyncio.sleep(0.5)
assert expected_event == actual_event
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger.get_async_hook")
@mock.patch.object(DataprocBatchTrigger, "log")
async def test_create_batch_run_loop_is_still_running(self, mock_log, mock_get_async_hook, batch_trigger):
mock_batch = mock.MagicMock()
mock_batch.state = Batch.State.RUNNING
future = asyncio.Future()
future.set_result(mock_batch)
mock_get_async_hook.return_value.get_batch.return_value = future
task = asyncio.create_task(batch_trigger.run().__anext__())
await asyncio.sleep(0.5)
assert not task.done()
mock_log.info.assert_called()
| TestDataprocBatchTrigger |
python | pandas-dev__pandas | pandas/tests/series/test_missing.py | {
"start": 230,
"end": 2593
} | class ____:
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_timedelta64_nan(self):
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
assert isna(td1[0])
assert td1[0]._value == iNaT
td1[0] = td[0]
assert not isna(td1[0])
# GH#16674 iNaT is treated as an integer when given by the user
with pytest.raises(TypeError, match="Invalid value"):
td1[1] = iNaT
td1[2] = NaT
assert isna(td1[2])
assert td1[2]._value == iNaT
td1[2] = td[2]
assert not isna(td1[2])
# boolean setting
# GH#2899 boolean setting
td3 = np.timedelta64(timedelta(days=3))
td7 = np.timedelta64(timedelta(days=7))
td[(td > td3) & (td < td7)] = np.nan
assert isna(td).sum() == 3
@pytest.mark.xfail(
reason="Chained inequality raises when trying to define 'selector'"
)
def test_logical_range_select(self, datetime_series):
# NumPy limitation =(
# https://github.com/pandas-dev/pandas/commit/9030dc021f07c76809848925cb34828f6c8484f3
selector = -0.5 <= datetime_series <= 0.5
expected = (datetime_series >= -0.5) & (datetime_series <= 0.5)
tm.assert_series_equal(selector, expected)
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts.index = ts.index._with_freq(None)
ts[::2] = np.nan
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notna(ts)])
def test_hasnans_uncached_for_series():
# GH#19700
# set float64 dtype to avoid upcast when setting nan
idx = Index([0, 1], dtype="float64")
assert idx.hasnans is False
assert "hasnans" in idx._cache
ser = idx.to_series()
assert ser.hasnans is False
assert not hasattr(ser, "_cache")
ser.iloc[-1] = np.nan
assert ser.hasnans is True
| TestSeriesMissingData |
python | django__django | tests/aggregation_regress/tests.py | {
"start": 884,
"end": 70015
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name="Apress", num_awards=3)
cls.p2 = Publisher.objects.create(name="Sams", num_awards=1)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = HardbackBook.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
weight=4.5,
)
cls.b6 = HardbackBook.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
weight=3.7,
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.items():
self.assertEqual(getattr(obj, attr), value)
def test_annotation_with_value(self):
values = (
Book.objects.filter(
name="Practical Django Projects",
)
.annotate(
discount_price=F("price") * 2,
)
.values(
"discount_price",
)
.annotate(sum_discount=Sum("discount_price"))
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
values,
[
{
"discount_price": Decimal("59.38"),
"sum_discount": Decimal("59.38"),
}
],
)
if connection.features.allows_group_by_select_index:
self.assertIn("GROUP BY 1", ctx[0]["sql"])
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
The subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values("contact").annotate(Max("id"))
qs = qs.order_by("contact").values_list("id__max", flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by("id")
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(
select={
"name_of_shortest_book": shortest_book_sql,
}
).annotate(total_books=Count("book"))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)},
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum("pages"), Avg("pages")),
{"pages__sum": 3703, "pages__avg": Approximate(617.166, places=2)},
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={"price_per_page": "price / pages"}).aggregate(
Sum("pages")
),
{"pages__sum": 3703},
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select
# computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Order of the annotate/extra in the query doesn't matter
obj = (
Book.objects.extra(select={"manufacture_cost": "price * .5"})
.annotate(mean_auth_age=Avg("authors__age"))
.get(pk=self.b2.pk)
)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn="067232959",
mean_auth_age=45.0,
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0,
)
# Different DB backends return different types for the extra select
# computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal("11.545")))
# Values queries can be combined with annotate and extra
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.values()
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = (
Book.objects.values()
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"manufacture_cost": "price * .5"})
.get(pk=self.b2.pk)
)
manufacture_cost = obj["manufacture_cost"]
self.assertIn(manufacture_cost, (11.545, Decimal("11.545")))
del obj["manufacture_cost"]
self.assertEqual(
obj,
{
"id": self.b2.id,
"contact_id": self.a3.id,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": self.p2.id,
"rating": 3.0,
},
)
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
obj = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.values("name", "mean_auth_age")
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.values("name")
.filter(n_authors__gt=2)
)
self.assertSequenceEqual(
qs,
[{"name": "Python Web Development with Django"}],
)
# The annotations are added to values output if values() precedes
# annotate()
obj = (
Book.objects.values("name")
.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"price_per_page": "price / pages"})
.get(pk=self.b1.pk)
)
self.assertEqual(
obj,
{
"mean_auth_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
},
)
# All of the objects are getting counted (allow_nulls) and that values
# respects the amount of objects
self.assertEqual(len(Author.objects.annotate(Avg("friends__age")).values()), 9)
# Consecutive calls to annotate accumulate in the query
qs = (
Book.objects.values("price")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "price")
.annotate(Max("publisher__num_awards"))
)
self.assertSequenceEqual(
qs,
[
{"price": Decimal("30"), "oldest": 35, "publisher__num_awards__max": 3},
{
"price": Decimal("29.69"),
"oldest": 37,
"publisher__num_awards__max": 7,
},
{
"price": Decimal("23.09"),
"oldest": 45,
"publisher__num_awards__max": 1,
},
{"price": Decimal("75"), "oldest": 57, "publisher__num_awards__max": 9},
{
"price": Decimal("82.8"),
"oldest": 57,
"publisher__num_awards__max": 7,
},
],
)
def test_aggregate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("pages"), Max("price"), Sum("num_authors"), Avg("num_authors")
)
self.assertEqual(
vals,
{
"num_authors__sum": 10,
"num_authors__avg": Approximate(1.666, places=2),
"pages__max": 1132,
"price__max": Decimal("82.80"),
},
)
# Regression for #15624 - Missing SELECT columns when using values,
# annotate and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count("authors")).values("c").aggregate(Max("c")),
{"c__max": 3},
)
def test_conditional_aggregate(self):
# Conditional aggregation of a grouped queryset.
self.assertEqual(
Book.objects.annotate(c=Count("authors"))
.values("pk")
.aggregate(test=Sum(Case(When(c__gt=1, then=1))))["test"],
3,
)
def test_sliced_conditional_aggregate(self):
self.assertEqual(
Author.objects.order_by("pk")[:5].aggregate(
test=Sum(Case(When(age__lte=35, then=1)))
)["test"],
3,
)
def test_annotated_conditional_aggregate(self):
annotated_qs = Book.objects.annotate(
discount_price=F("price") * Decimal("0.75")
)
self.assertAlmostEqual(
annotated_qs.aggregate(
test=Avg(
Case(
When(pages__lt=400, then="discount_price"),
output_field=DecimalField(),
)
)
)["test"],
Decimal("22.27"),
places=2,
)
def test_distinct_conditional_aggregate(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(price=Decimal("29.69"), then="pages"),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_conditional_aggregate_on_complex_condition(self):
self.assertEqual(
Book.objects.distinct().aggregate(
test=Avg(
Case(
When(
Q(price__gte=Decimal("29")) & Q(price__lt=Decimal("30")),
then="pages",
),
output_field=IntegerField(),
)
)
)["test"],
325,
)
def test_q_annotation_aggregate(self):
self.assertEqual(Book.objects.annotate(has_pk=Q(pk__isnull=False)).count(), 6)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__gt=Decimal(40)
)
),
1,
)
self.assertEqual(
len(
Author.objects.annotate(sum=Sum("book_contact_set__price")).filter(
sum__lte=Decimal(40)
)
),
4,
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, pages, price, "
"pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.aggregate(num_authors=Count("foo"))
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("foo"))
msg = (
"Cannot resolve keyword 'foo' into field. Choices are: authors, "
"contact, contact_id, hardbackbook, id, isbn, name, num_authors, "
"pages, price, pubdate, publisher, publisher_id, rating, store, tags"
)
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Max("foo")
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(Book.objects.annotate(num_authors=Count("authors")).count(), 6)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count("authors")).aggregate(
Max("num_authors")
)
self.assertEqual(vals, {"num_authors__max": 3})
vals = Publisher.objects.annotate(avg_price=Avg("book__price")).aggregate(
Max("avg_price")
)
self.assertEqual(vals, {"avg_price__max": 75.0})
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max("pages"), select=Max("pages"))
self.assertEqual(vals, {"number": 1132, "select": 1132})
# Regression for #10064: select_related() plays nice with aggregates
obj = (
Book.objects.select_related("publisher")
.annotate(num_authors=Count("authors"))
.values()
.get(isbn="013790395")
)
self.assertEqual(
obj,
{
"contact_id": self.a8.id,
"id": self.b5.id,
"isbn": "013790395",
"name": "Artificial Intelligence: A Modern Approach",
"num_authors": 2,
"pages": 1132,
"price": Decimal("82.8"),
"pubdate": datetime.date(1995, 1, 15),
"publisher_id": self.p3.id,
"rating": 4.0,
},
)
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(len(Book.objects.annotate(num_authors=Count("authors"))), 6)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).filter(
num_authors__gt=2
)
),
1,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors")).exclude(
num_authors__gt=2
)
),
5,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2,
)
self.assertEqual(
len(
Book.objects.annotate(num_authors=Count("authors"))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2,
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_awards__gt=2 * F("num_books"))
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 1, "name": "Morgan Kaufmann", "num_awards": 9},
{"num_books": 2, "name": "Prentice Hall", "num_awards": 7},
],
)
qs = (
Publisher.objects.annotate(num_books=Count("book"))
.exclude(num_books__lt=F("num_awards") / 2)
.order_by("name")
.values("name", "num_books", "num_awards")
)
self.assertSequenceEqual(
qs,
[
{"num_books": 2, "name": "Apress", "num_awards": 3},
{"num_books": 0, "name": "Jonno's House of Books", "num_awards": 0},
{"num_books": 1, "name": "Sams", "num_awards": 1},
],
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values("EntryID__Entry").annotate(
Appearances=Count("EntryID"), Distinct_Clues=Count("Clue", distinct=True)
)
self.assertSequenceEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count("clues__ID"))
self.assertSequenceEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry="foo")
c = Clues.objects.create(EntryID=e, Clue="bar")
qs = Clues.objects.select_related("EntryID").annotate(Count("ID"))
self.assertSequenceEqual(qs, [c])
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(Book.objects.filter(id__in=[]).count(), 0)
vals = Book.objects.filter(id__in=[]).aggregate(
num_authors=Count("authors"),
avg_authors=Avg("authors"),
max_authors=Max("authors"),
max_price=Max("price"),
max_rating=Max("rating"),
)
self.assertEqual(
vals,
{
"max_authors": None,
"max_rating": None,
"num_authors": 0,
"avg_authors": None,
"max_price": None,
},
)
qs = (
Publisher.objects.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count("book__authors"),
avg_authors=Avg("book__authors"),
max_authors=Max("book__authors"),
max_price=Max("book__price"),
max_rating=Max("book__rating"),
)
.values()
)
self.assertSequenceEqual(
qs,
[
{
"max_authors": None,
"name": "Jonno's House of Books",
"num_awards": 0,
"max_price": None,
"num_authors": 0,
"max_rating": None,
"id": self.p5.id,
"avg_authors": None,
}
],
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerySetEqual(
Book.objects.annotate(num_authors=Count("authors")).order_by(
"publisher__name", "name"
),
[
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name,
)
# Regression for #10127 - Empty select_related() works with annotate
qs = (
Book.objects.filter(rating__lt=4.5)
.select_related()
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerySetEqual(
qs,
[
(
"Artificial Intelligence: A Modern Approach",
51.5,
"Prentice Hall",
"Peter Norvig",
),
("Practical Django Projects", 29.0, "Apress", "James Bennett"),
(
"Python Web Development with Django",
Approximate(30.333, places=2),
"Prentice Hall",
"Jeffrey Forcier",
),
("Sams Teach Yourself Django in 24 Hours", 45.0, "Sams", "Brad Dayley"),
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name),
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = (
Book.objects.extra(select={"pub": "publisher_id"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
qs = (
Book.objects.extra(select={"pub": "publisher_id", "foo": "pages"})
.values("pub")
.annotate(Count("id"))
.order_by("pub")
)
self.assertSequenceEqual(
qs,
[
{"pub": self.p1.id, "id__count": 2},
{"pub": self.p2.id, "id__count": 1},
{"pub": self.p3.id, "id__count": 2},
{"pub": self.p4.id, "id__count": 1},
],
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects.filter(pages__gt=100)
.annotate(n_authors=Count("authors"))
.filter(n_authors__gt=2)
.order_by("n_authors")
)
self.assertQuerySetEqual(
Book.objects.filter(id__in=ids),
[
"Python Web Development with Django",
],
lambda b: b.name,
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(
Book.objects.values("publisher")
.annotate(max_pages=Max("pages"))
.order_by()
.query
)
# There is just one GROUP BY clause (zero commas means at most one
# clause).
self.assertEqual(qstr[qstr.index("GROUP BY") :].count(", "), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises
# ValueError.
msg = (
"The named annotation 'authors__age__avg' conflicts with "
"the default name for another annotation."
)
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(
Avg("authors__age"), authors__age__avg=Avg("authors__age")
)
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
msg = "The annotation 'age' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(age=Avg("friends__age"))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
msg = "The annotation 'friends' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(friends=Count("friends"))
def test_fk_attname_conflict(self):
msg = "The annotation 'contact_id' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Book.objects.annotate(contact_id=F("publisher_id"))
def test_values_queryset_non_conflict(self):
# If you're using a values query set, some potential conflicts are
# avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = (
Author.objects.values("name")
.annotate(age=Count("book_contact_set"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 1)
# Same problem, but aggregating over m2m fields
results = (
Author.objects.values("name")
.annotate(age=Avg("friends__age"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["age"], 32.0)
# Same problem, but colliding with an m2m field
results = (
Author.objects.values("name")
.annotate(friends=Count("friends"))
.order_by("name")
)
self.assertEqual(len(results), 9)
self.assertEqual(results[0]["name"], "Adrian Holovaty")
self.assertEqual(results[0]["friends"], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises
# ValueError
msg = "The annotation 'book_contact_set' conflicts with a field on the model."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(book_contact_set=Avg("friends__age"))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count("authors"))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerySetEqual(
books.all(),
[
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
"Practical Django Projects",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
# Regression for #10248 - Annotations work with dates()
qs = (
Book.objects.annotate(num_authors=Count("authors"))
.filter(num_authors=2)
.dates("pubdate", "day")
)
self.assertSequenceEqual(
qs,
[
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects.annotate(mean_auth_age=Avg("authors__age"))
.extra(select={"sheets": "(pages + %s) / %s"}, select_params=[1, 2])
.order_by("sheets")
.values("sheets")
)
self.assertQuerySetEqual(
qs, [150, 175, 224, 264, 473, 566], lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values("publisher").annotate(Count("publisher")).count(), 4
)
self.assertEqual(
Book.objects.annotate(Count("publisher")).values("publisher").count(), 6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[self.p1.id, self.p2.id])
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(sorted_publishers[0].n_books, 2)
self.assertEqual(sorted_publishers[1].n_books, 1)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerySetEqual(
books,
[
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name,
)
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("book_ptr__pages")),
{"n_pages": 2078},
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum("pages")),
{"n_pages": 2078},
)
qs = (
HardbackBook.objects.annotate(
n_authors=Count("book_ptr__authors"),
)
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
qs = (
HardbackBook.objects.annotate(n_authors=Count("authors"))
.values("name", "n_authors")
.order_by("name")
)
self.assertSequenceEqual(
qs,
[
{"n_authors": 2, "name": "Artificial Intelligence: A Modern Approach"},
{
"n_authors": 1,
"name": (
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp"
),
},
],
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Book.objects.annotate(mean_age=Avg("authors__age")).annotate(
Avg("mean_age")
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[])
.annotate(Count("friends"))
.aggregate(Count("pk")),
{"pk__count": 0},
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg("age")), {"age__avg": None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name"))
.exclude(friends__name="Joe")
.count(),
Author.objects.count(),
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.filter(pages__lt=F("n_authors") * 200)
.values_list("pk")
)
self.assertQuerySetEqual(
Book.objects.filter(pk__in=qs),
["Python Web Development with Django"],
attrgetter("name"),
)
def test_values_annotate_values(self):
qs = (
Book.objects.values("name")
.annotate(n_authors=Count("authors"))
.values_list("pk", flat=True)
.order_by("name")
)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# When a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = (
Book.objects.values_list("name")
.annotate(n_authors=Count("authors"))
.filter(pages__gt=F("n_authors"))
.values_list("name", flat=True)
.order_by("name")
)
# Results should be the same, all Books have more pages than authors
self.assertEqual(list(qs), list(Book.objects.values_list("name", flat=True)))
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = (
Book.objects.values_list("publisher__name")
.annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
)
.order_by("-publisher__name")
)
self.assertEqual(books[0], ("Sams", 1, Decimal("23.09"), 45.0, 528.0))
def test_annotation_disjunction(self):
qs = (
Book.objects.annotate(n_authors=Count("authors"))
.filter(Q(n_authors=2) | Q(name="Python Web Development with Django"))
.order_by("name")
)
self.assertQuerySetEqual(
qs,
[
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right")
| (
Q(name="Artificial Intelligence: A Modern Approach")
& Q(n_authors=3)
)
)
).order_by("name")
self.assertQuerySetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True))
.order_by("pk")
)
self.assertQuerySetEqual(
qs,
[
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name"),
)
qs = (
Publisher.objects.annotate(
rating_sum=Sum("book__rating"), book_count=Count("book")
)
.filter(Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None))
.order_by("num_awards")
)
self.assertQuerySetEqual(
qs,
[
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann",
],
attrgetter("name"),
)
def test_quoting_aggregate_order_by(self):
qs = (
Book.objects.filter(name="Python Web Development with Django")
.annotate(authorCount=Count("authors"))
.order_by("authorCount")
)
self.assertQuerySetEqual(
qs,
[
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount),
)
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev("pages")),
{"pages__stddev": Approximate(311.46, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating")),
{"rating__stddev": Approximate(0.60, 1)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price")),
{"price__stddev": Approximate(Decimal("24.16"), 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("pages", sample=True)),
{"pages__stddev": Approximate(341.19, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("rating", sample=True)),
{"rating__stddev": Approximate(0.66, 2)},
)
self.assertEqual(
Book.objects.aggregate(StdDev("price", sample=True)),
{"price__stddev": Approximate(Decimal("26.46"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages")),
{"pages__variance": Approximate(97010.80, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating")),
{"rating__variance": Approximate(0.36, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price")),
{"price__variance": Approximate(Decimal("583.77"), 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("pages", sample=True)),
{"pages__variance": Approximate(116412.96, 1)},
)
self.assertEqual(
Book.objects.aggregate(Variance("rating", sample=True)),
{"rating__variance": Approximate(0.44, 2)},
)
self.assertEqual(
Book.objects.aggregate(Variance("price", sample=True)),
{"price__variance": Approximate(Decimal("700.53"), 2)},
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.filter(book_cnt=2)
.order_by("name")
)
self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Neither in this case
qs = (
Author.objects.annotate(book_count=Count("book"))
.filter(book_count=2)
.order_by("name")
)
self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = (
Author.objects.annotate(Count("book"))
.filter(book__count=2)
.order_by("name")
)
self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count("book")).aggregate(Max("book__count")),
{"book__count__max": 2},
)
def test_annotate_joins(self):
"""
The base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for
fk-field in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count("pk"))
self.assertIs(qs.query.alias_map["aggregation_regress_book"].join_type, None)
# The query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count("book_contact_set"))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn("id", group_by[0][0])
self.assertNotIn("name", group_by[0][0])
self.assertNotIn("age", group_by[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only("id", "name").annotate(
num_contacts=Count("book_contact_set")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("age", grouping[0][0])
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by("name")],
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 0),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 0),
("Peter Norvig", 2),
("Stuart Russell", 0),
("Wesley J. Chun", 0),
],
)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
_, _, grouping = results.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 2)
self.assertIn("id", grouping[0][0])
self.assertNotIn("name", grouping[0][0])
self.assertNotIn("contact", grouping[0][0])
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_columns(self):
"""
Unmanaged models are sometimes used to represent database views which
may not allow grouping by selected primary key.
"""
def assertQuerysetResults(queryset):
self.assertEqual(
[(b.name, b.num_authors) for b in queryset.order_by("name")],
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
)
queryset = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Unmanaged origin model.
with mock.patch.object(Book._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Book._meta.fields) + 1)
for index, field in enumerate(Book._meta.fields):
self.assertIn(field.name, grouping[index][0])
self.assertIn(Author._meta.pk.name, grouping[-1][0])
assertQuerysetResults(queryset)
# Unmanaged related model.
with mock.patch.object(Author._meta, "managed", False):
_, _, grouping = queryset.query.get_compiler(
using="default"
).pre_sql_setup()
self.assertEqual(len(grouping), len(Author._meta.fields) + 1)
self.assertIn(Book._meta.pk.name, grouping[0][0])
for index, field in enumerate(Author._meta.fields):
self.assertIn(field.name, grouping[index + 1][0])
assertQuerysetResults(queryset)
@skipUnlessDBFeature("allows_group_by_selected_pks")
def test_aggregate_unmanaged_model_as_tables(self):
qs = Book.objects.select_related("contact").annotate(
num_authors=Count("authors")
)
# Force treating unmanaged models as tables.
with mock.patch(
"django.db.connection.features.allows_group_by_selected_pks_on_model",
return_value=True,
):
with (
mock.patch.object(Book._meta, "managed", False),
mock.patch.object(Author._meta, "managed", False),
):
_, _, grouping = qs.query.get_compiler(using="default").pre_sql_setup()
self.assertEqual(len(grouping), 2)
self.assertIn("id", grouping[0][0])
self.assertIn("id", grouping[1][0])
self.assertQuerySetEqual(
qs.order_by("name"),
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case "
"Studies in Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
(
"The Definitive Guide to Django: Web Development Done "
"Right",
2,
),
],
attrgetter("name", "num_authors"),
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count("book_contact_set__contact"))
self.assertIn(" JOIN ", str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name="Practical Django Projects")
ItemTag.objects.create(
object_id=django_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(django_book),
)
ItemTag.objects.create(
object_id=django_book.id,
tag="django",
content_type=ContentType.objects.get_for_model(django_book),
)
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(
object_id=wmpk.id,
tag="hi mom",
content_type=ContentType.objects.get_for_model(wmpk),
)
ai_book = Book.objects.get(
name__startswith="Paradigms of Artificial Intelligence"
)
ItemTag.objects.create(
object_id=ai_book.id,
tag="intermediate",
content_type=ContentType.objects.get_for_model(ai_book),
)
self.assertEqual(Book.objects.aggregate(Count("tags")), {"tags__count": 3})
results = Book.objects.annotate(Count("tags")).order_by("-tags__count", "name")
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
("Practical Django Projects", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Artificial Intelligence: A Modern Approach", 0),
("Python Web Development with Django", 0),
("Sams Teach Yourself Django in 24 Hours", 0),
("The Definitive Guide to Django: Web Development Done Right", 0),
],
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2), Q(book_cnt=2))
.order_by("name")
)
self.assertQuerySetEqual(qs, expected_results, lambda b: b.name)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
expected_results = [a.name for a in expected_results]
qs = (
Author.objects.annotate(book_cnt=Count("book"))
.exclude(Q(book_cnt=2) | Q(book_cnt=2))
.order_by("name")
)
self.assertQuerySetEqual(qs, expected_results, lambda b: b.name)
def test_name_filters(self):
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(book__count__exact=2) | Q(name="Adrian Holovaty"))
.order_by("name")
)
self.assertQuerySetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_name_expressions(self):
# Aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = (
Author.objects.annotate(Count("book"))
.filter(Q(name="Peter Norvig") | Q(age=F("book__count") + 33))
.order_by("name")
)
self.assertQuerySetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
def test_filter_aggregates_or_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 | q2).order_by("pk")
self.assertQuerySetEqual(
query,
[self.b1.pk, self.b4.pk, self.b5.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_and_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 & q2)).order_by("pk")
)
self.assertQuerySetEqual(
query,
[self.b1.pk, self.b2.pk, self.b3.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 ^ q2).order_by("pk")
self.assertQuerySetEqual(
query,
[self.b1.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
)
def test_filter_aggregates_negated_xor_connector(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 ^ q2)).order_by("pk")
)
self.assertQuerySetEqual(
query,
[self.b2.pk, self.b3.pk, self.b5.pk],
attrgetter("pk"),
)
def test_ticket_11293_q_immutable(self):
"""
Splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn="")
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors"))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
An F() object referring to related column works correctly in group by.
"""
qs = Book.objects.annotate(account=Count("authors")).filter(
account=F("publisher__num_awards")
)
self.assertQuerySetEqual(
qs, ["Sams Teach Yourself Django in 24 Hours"], lambda b: b.name
)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count("authors__id")).aggregate(
Sum("select"), Avg("select")
)
self.assertEqual(
vals,
{
"select__sum": 10,
"select__avg": Approximate(1.666, places=2),
},
)
def test_annotate_on_relation(self):
book = Book.objects.annotate(
avg_price=Avg("price"), publisher_name=F("publisher__name")
).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg("price")).aggregate(
publisher_awards=Sum("publisher__num_awards")
)
self.assertEqual(qs["publisher_awards"], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = (
Book.objects.values("rating", "price")
.distinct()
.aggregate(result=Sum("rating"))
)
vals2 = Book.objects.aggregate(result=Sum("rating") - Value(4.0))
self.assertEqual(vals1, vals2)
def test_annotate_values_list_flat(self):
"""Find ages that are shared by at least two authors."""
qs = (
Author.objects.values_list("age", flat=True)
.annotate(age_count=Count("age"))
.filter(age_count__gt=1)
)
self.assertSequenceEqual(qs, [29])
def test_allow_distinct(self):
class MyAggregate(Aggregate):
pass
with self.assertRaisesMessage(TypeError, "MyAggregate does not allow distinct"):
MyAggregate("foo", distinct=True)
class DistinctAggregate(Aggregate):
allow_distinct = True
DistinctAggregate("foo", distinct=True)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_having_subquery_select(self):
authors = Author.objects.filter(pk=self.a1.pk)
books = Book.objects.annotate(Count("authors")).filter(
Q(authors__in=authors) | Q(authors__count__gt=2)
)
self.assertEqual(set(books), {self.b1, self.b4})
def test_aggregate_and_annotate_duplicate_columns(self):
books = (
Book.objects.values("isbn")
.annotate(
name=F("publisher__name"),
num_authors=Count("authors"),
)
.order_by("isbn")
)
self.assertSequenceEqual(
books,
[
{"isbn": "013235613", "name": "Prentice Hall", "num_authors": 3},
{"isbn": "013790395", "name": "Prentice Hall", "num_authors": 2},
{"isbn": "067232959", "name": "Sams", "num_authors": 1},
{"isbn": "155860191", "name": "Morgan Kaufmann", "num_authors": 1},
{"isbn": "159059725", "name": "Apress", "num_authors": 2},
{"isbn": "159059996", "name": "Apress", "num_authors": 1},
],
)
def test_aggregate_and_annotate_duplicate_columns_proxy(self):
author = AuthorProxy.objects.latest("pk")
recipe = RecipeProxy.objects.create(name="Dahl", author=author)
recipe.tasters.add(author)
recipes = RecipeProxy.objects.values("pk").annotate(
name=F("author__name"),
num_tasters=Count("tasters"),
)
self.assertSequenceEqual(
recipes,
[{"pk": recipe.pk, "name": "Stuart Russell", "num_tasters": 1}],
)
def test_aggregate_and_annotate_duplicate_columns_unmanaged(self):
author = AuthorProxy.objects.latest("pk")
recipe = RecipeProxy.objects.create(name="Dahl", author=author)
recipe.tasters.add(author)
recipes = RecipeUnmanaged.objects.values("pk").annotate(
name=F("author__age"),
num_tasters=Count("tasters"),
)
self.assertSequenceEqual(
recipes,
[{"pk": recipe.pk, "name": 46, "num_tasters": 1}],
)
def test_aggregate_group_by_unseen_columns_unmanaged(self):
author = AuthorProxy.objects.latest("pk")
shadow_author = AuthorProxy.objects.create(name=author.name, age=author.age - 2)
recipe = RecipeProxy.objects.create(name="Dahl", author=author)
shadow_recipe = RecipeProxy.objects.create(
name="Shadow Dahl",
author=shadow_author,
)
recipe.tasters.add(shadow_author)
shadow_recipe.tasters.add(author)
# This selects how many tasters each author had according to a
# calculated field "name". The table has a column "name" that Django is
# unaware of, and is equal for the two authors. The grouping column
# cannot be referenced by its name ("name"), as it'd return one result
# which is incorrect.
author_recipes = (
AuthorUnmanaged.objects.annotate(
name=Concat(
Value("Writer at "),
Cast(F("age"), output_field=CharField()),
)
)
.values("name") # Field used for grouping.
.annotate(num_recipes=Count("recipeunmanaged"))
.filter(num_recipes__gt=0)
.values("num_recipes") # Drop grouping column.
)
self.assertSequenceEqual(
author_recipes,
[{"num_recipes": 1}, {"num_recipes": 1}],
)
| AggregationTests |
python | pdm-project__pdm | src/pdm/project/core.py | {
"start": 1988,
"end": 45321
} | class ____:
"""Core project class.
Args:
core: The core instance.
root_path: The root path of the project.
is_global: Whether the project is global.
global_config: The path to the global config file.
"""
PYPROJECT_FILENAME = "pyproject.toml"
DEPENDENCIES_RE = re.compile(r"(?:(.+?)-)?dependencies")
def __init__(
self,
core: Core,
root_path: str | Path | None,
is_global: bool = False,
global_config: str | Path | None = None,
) -> None:
import platformdirs
self._lockfile: Lockfile | None = None
self._environment: BaseEnvironment | None = None
self._python: PythonInfo | None = None
self._cache_dir: Path | None = None
self.core = core
if global_config is None:
global_config = platformdirs.user_config_path("pdm") / "config.toml"
self.global_config = Config(Path(global_config), is_global=True)
global_project = Path(self.global_config["global_project.path"]).expanduser()
if root_path is None:
root_path = find_project_root() if not is_global else global_project
if (
not is_global
and root_path is None
and self.global_config["global_project.fallback"]
and not is_conda_base()
):
root_path = global_project
is_global = True
if self.global_config["global_project.fallback_verbose"]:
self.core.ui.info("Project is not found, fallback to the global project")
self.root: Path = Path(root_path or "").absolute()
self.is_global = is_global
self.enable_write_lockfile = os.getenv("PDM_FROZEN_LOCKFILE", os.getenv("PDM_NO_LOCK", "0")).lower() not in (
"1",
"true",
)
self.init_global_project()
def __repr__(self) -> str:
return f"<Project '{self.root.as_posix()}'>"
@cached_property
def cache_dir(self) -> Path:
return Path(self.config.get("cache_dir", "")).expanduser()
@cached_property
def pyproject(self) -> PyProject:
return PyProject(self.root / self.PYPROJECT_FILENAME, ui=self.core.ui)
@property
def lockfile(self) -> Lockfile:
if self._lockfile is None:
enable_pylock = self.config["lock.format"] == "pylock"
if (path := self.root / "pylock.toml").exists() and enable_pylock:
self.set_lockfile(path)
elif (path := self.root / "pdm.lock").exists():
if enable_pylock: # pragma: no cover
self.core.ui.warn(
"`lock.format` is set to pylock but pylock.toml is not found, using pdm.lock instead. "
"You can generate pylock with `pdm export -f pylock -o pylock.toml`."
)
self.set_lockfile(path)
else:
file_path = "pylock.toml" if enable_pylock else "pdm.lock"
self.set_lockfile(self.root / file_path)
assert self._lockfile is not None
return self._lockfile
def set_lockfile(self, path: str | Path) -> None:
self._lockfile = load_lockfile(self, path)
if self.config.get("use_uv"):
self._lockfile.default_strategies.discard(FLAG_INHERIT_METADATA)
if not self.config["strategy.inherit_metadata"]:
self._lockfile.default_strategies.discard(FLAG_INHERIT_METADATA)
@cached_property
def config(self) -> Mapping[str, Any]:
"""A read-only dict configuration"""
import collections
return collections.ChainMap(self.project_config, self.global_config)
@property
def scripts(self) -> dict[str, str | dict[str, str]]:
return self.pyproject.settings.get("scripts", {})
@cached_property
def project_config(self) -> Config:
"""Read-and-writable configuration dict for project settings"""
config = Config(self.root / "pdm.toml")
# TODO: for backward compatibility, remove this in the future
if self.root.joinpath(".pdm.toml").exists():
legacy_config = Config(self.root / ".pdm.toml").self_data
config.update((k, v) for k, v in legacy_config.items() if k != "python.path")
return config
@property
def name(self) -> str:
return cast(str, self.pyproject.metadata.get("name"))
@property
def python(self) -> PythonInfo:
if not self._python:
python = self.resolve_interpreter()
if python.major < 3:
raise PdmUsageError(
"Python 2.7 has reached EOL and PDM no longer supports it. "
"Please upgrade your Python to 3.6 or later.",
)
if self.is_global and is_conda_base_python(python.path): # pragma: no cover
raise PdmUsageError("Can't use global project in conda base environment since it is managed by conda")
self._python = python
return self._python
@python.setter
def python(self, value: PythonInfo) -> None:
self._python = value
self._saved_python = value.path.as_posix()
@property
def _saved_python(self) -> str | None:
if os.getenv("PDM_PYTHON"):
return os.getenv("PDM_PYTHON")
with contextlib.suppress(FileNotFoundError):
return self.root.joinpath(".pdm-python").read_text("utf-8").strip()
with contextlib.suppress(FileNotFoundError):
# TODO: remove this in the future
with self.root.joinpath(".pdm.toml").open("rb") as fp:
data = tomllib.load(fp)
if data.get("python", {}).get("path"):
return data["python"]["path"]
return None
@_saved_python.setter
def _saved_python(self, value: str | None) -> None:
self.root.mkdir(parents=True, exist_ok=True)
python_file = self.root.joinpath(".pdm-python")
if value is None:
with contextlib.suppress(FileNotFoundError):
python_file.unlink()
return
python_file.write_text(value, "utf-8")
def resolve_interpreter(self) -> PythonInfo:
"""Get the Python interpreter path."""
from pdm.cli.commands.venv.utils import iter_venvs
from pdm.models.venv import get_venv_python
def match_version(python: PythonInfo) -> bool:
return python.valid and self.python_requires.contains(python.version, True)
def note(message: str) -> None:
if not self.is_global:
self.core.ui.info(message)
def is_active_venv(python: PythonInfo) -> bool:
if not (venv := os.getenv("VIRTUAL_ENV", os.getenv("CONDA_PREFIX"))):
return False
return is_path_relative_to(python.executable, venv)
config = self.config
saved_path = self._saved_python
if saved_path and not ensure_boolean(os.getenv("PDM_IGNORE_SAVED_PYTHON")):
python = PythonInfo.from_path(saved_path)
if match_version(python):
return python
elif not python.valid:
note("The saved Python interpreter does not exist or broken. Trying to find another one.")
else:
note(
"The saved Python interpreter doesn't match the project's requirement. Trying to find another one."
)
self._saved_python = None # Clear the saved path if it doesn't match
if config.get("python.use_venv") and not self.is_global:
# Resolve virtual environments from env-vars
ignore_active_venv = ensure_boolean(os.getenv("PDM_IGNORE_ACTIVE_VENV"))
venv_in_env = os.getenv("VIRTUAL_ENV", os.getenv("CONDA_PREFIX"))
# We don't auto reuse conda's base env since it may cause breakage when removing packages.
if not ignore_active_venv and venv_in_env and not is_conda_base():
python = PythonInfo.from_path(get_venv_python(Path(venv_in_env)))
if match_version(python):
note(
f"Inside an active virtualenv [success]{venv_in_env}[/], reusing it.\n"
"Set env var [success]PDM_IGNORE_ACTIVE_VENV[/] to ignore it."
)
return python
# otherwise, get a venv associated with the project
for _, venv in iter_venvs(self):
python = PythonInfo.from_path(venv.interpreter)
if match_version(python) and not (ignore_active_venv and is_active_venv(python)):
note(f"Virtualenv [success]{venv.root}[/] is reused.")
self.python = python
return python
if not self.root.joinpath("__pypackages__").exists():
self.core.ui.warn(
f"Project requires a python version of {self.python_requires}, "
f"The virtualenv is being created for you as it cannot be matched to the right version."
)
note("python.use_venv is on, creating a virtualenv for this project...")
venv_path = self._create_virtualenv()
self.python = PythonInfo.from_path(get_venv_python(venv_path))
return self.python
if self.root.joinpath("__pypackages__").exists() or not config["python.use_venv"] or self.is_global:
for py_version in self.iter_interpreters(
filter_func=match_version, respect_version_file=config["python.use_python_version"]
):
note("[success]__pypackages__[/] is detected, using the PEP 582 mode")
self.python = py_version
return py_version
raise NoPythonVersion(f"No Python that satisfies {self.python_requires} is found on the system.")
def get_environment(self) -> BaseEnvironment:
from pdm.environments import PythonEnvironment, PythonLocalEnvironment
"""Get the environment selected by this project"""
if self.is_global:
env = PythonEnvironment(self)
# Rewrite global project's python requires to be
# compatible with the exact version
env.python_requires = PySpecSet(f"=={self.python.version}")
return env
return (
PythonEnvironment(self)
if self.config["python.use_venv"] and self.python.get_venv() is not None
else PythonLocalEnvironment(self)
)
def _create_virtualenv(self, python: str | None = None) -> Path:
from pdm.cli.commands.venv.backends import BACKENDS
backend: str = self.config["venv.backend"]
if backend == "virtualenv" and self.config["use_uv"]:
backend = "uv"
venv_backend = BACKENDS[backend](self, python)
path = venv_backend.create(
force=True,
in_project=self.config["venv.in_project"],
prompt=self.config["venv.prompt"],
with_pip=self.config["venv.with_pip"],
)
self.core.ui.echo(f"Virtualenv is created successfully at [success]{path}[/]", err=True)
return path
@property
def environment(self) -> BaseEnvironment:
if not self._environment:
self._environment = self.get_environment()
return self._environment
@environment.setter
def environment(self, value: BaseEnvironment | None) -> None:
self._environment = value
@property
def python_requires(self) -> PySpecSet:
return PySpecSet(self.pyproject.metadata.get("requires-python", ""))
def get_dependencies(
self, group: str | None = None, all_dependencies: dict[str, list[Requirement]] | None = None
) -> Sequence[Requirement]:
group = normalize_name(group or "default")
if all_dependencies is None:
all_dependencies = self._resolve_dependencies([group])
if group not in all_dependencies:
raise ProjectError(f"Dependency group {group} does not exist")
return CompatibleSequence(all_dependencies[group])
def iter_groups(self) -> Iterable[str]:
groups = {"default"}
if self.pyproject.metadata.get("optional-dependencies"):
groups.update(self.pyproject.metadata["optional-dependencies"].keys())
groups.update(self.pyproject._data.get("dependency-groups", {}).keys())
groups.update(self.pyproject.settings.get("dev-dependencies", {}).keys())
return {normalize_name(g) for g in groups}
def _resolve_dependencies(
self, requested_groups: list[str] | None = None, include_referred: bool = True
) -> dict[str, list[Requirement]]:
"""Resolve dependencies for the given groups, and return a list of requirements for each group.
The .groups attribute will be set to all that refers this requirement directly or indirectly.
If `include_referred` is True, all self-references and `include-group` will be expanded to
corresponding requirements. Otherwise, each group only contains explicitly defined requirements.
"""
def _get_dependencies(group: str) -> tuple[list[Requirement], set[str]]:
in_metadata = group in metadata_dependencies
collected_deps: list[str] = []
referred: set[str] = set()
deps = metadata_dependencies.get(group, []) if in_metadata else dev_dependencies[group]
for item in deps:
if isinstance(item, str):
try:
name, extras = strip_extras(item)
except AssertionError:
pass
else:
if normalize_name(name) == project_name:
if extras:
allowed = (
set(metadata_dependencies)
if in_metadata
else {*metadata_dependencies, *dev_dependencies}
)
extras = tuple(normalize_name(extra) for extra in extras)
not_allowed = set(extras) - allowed
if not_allowed:
raise ProjectError(
f"Optional dependency group '{group}' cannot "
f"include non-existing extras: [{','.join(not_allowed)}]"
)
referred.update(extras)
continue
collected_deps.append(item)
elif not in_metadata and isinstance(item, dict):
if tuple(item.keys()) != ("include-group",):
raise ProjectError(f"Invalid dependency group item: {item}")
include_group = normalize_name(item["include-group"])
if include_group not in dev_dependencies:
raise ProjectError(f"Missing group '{include_group}' in `include-group`")
referred.add(include_group)
else:
raise ProjectError(f"Invalid dependency in group {group}: {item}")
result: list[Requirement] = []
with cd(self.root):
for line in collected_deps:
if line.startswith("-e ") and in_metadata:
self.core.ui.warn(
f"Skipping editable dependency [b]{line}[/] in the"
r" [success]\[project][/] table. Please move it to the "
r"[success]\[tool.pdm.dev-dependencies][/] table"
)
continue
req = parse_line(line)
req.groups = [group]
# make editable packages behind normal ones to override correctly.
result.append(req)
return result, referred
if requested_groups is None:
requested_groups = list(self.iter_groups())
requested_groups = [normalize_name(g) for g in requested_groups]
referred_groups: dict[str, set[str]] = {}
metadata_dependencies = {
normalize_name(k): v for k, v in self.pyproject.metadata.get("optional-dependencies", {}).items()
}
if "default" in metadata_dependencies: # pragma: no cover
raise ProjectError(
"'default' is reserved by the main dependencies and is not allowed in optional dependencies."
)
metadata_dependencies["default"] = self.pyproject.metadata.get("dependencies", [])
dev_dependencies = self.pyproject.dev_dependencies
if "default" in dev_dependencies: # pragma: no cover
raise ProjectError(
"'default' is reserved by the main dependencies and is not allowed in dependency groups."
)
group_deps: dict[str, list[Requirement]] = {}
project_name = normalize_name(self.name) if self.name else None
for group in requested_groups:
deps, referred = _get_dependencies(group)
group_deps[group] = deps
if referred:
referred_groups[group] = referred
extra_deps: dict[str, list[Requirement]] = {}
while referred_groups:
updated = False
ref_iter = list(referred_groups.items())
for group, referred in ref_iter:
for ref in list(referred):
if ref not in requested_groups:
deps, r = _get_dependencies(ref)
group_deps[ref] = deps
if r:
referred_groups[ref] = r
# append to the ref_iter to process later
ref_iter.append((ref, r))
requested_groups.append(ref)
if ref in referred_groups: # not resolved yet
continue
extra_deps.setdefault(group, []).extend(group_deps[ref])
for req in itertools.chain(group_deps[ref], extra_deps.get(ref, [])):
if group not in req.groups:
req.groups.append(group)
referred.remove(ref)
updated = True
if not referred:
referred_groups.pop(group)
if not updated:
raise ProjectError(f"Cyclic dependency group include detected: {set(referred_groups)}")
if include_referred:
for group, deps in extra_deps.items():
group_deps[group].extend(deps)
return group_deps
@property
def all_dependencies(self) -> dict[str, Sequence[Requirement]]:
return {k: CompatibleSequence(v) for k, v in self._resolve_dependencies(include_referred=False).items()}
@property
def default_source(self) -> RepositoryConfig:
"""Get the default source from the pypi setting"""
config = RepositoryConfig(
config_prefix="pypi",
name="pypi",
url=self.config["pypi.url"],
verify_ssl=self.config["pypi.verify_ssl"],
username=self.config.get("pypi.username"),
password=self.config.get("pypi.password"),
ca_certs=self.config.get("pypi.ca_certs"),
client_cert=self.config.get("pypi.client_cert"),
client_key=self.config.get("pypi.client_key"),
)
return config
@property
def sources(self) -> list[RepositoryConfig]:
return self.get_sources(include_stored=not self.config.get("pypi.ignore_stored_index", False))
def get_sources(self, expand_env: bool = True, include_stored: bool = False) -> list[RepositoryConfig]:
result: dict[str, RepositoryConfig] = {}
for source in self.pyproject.settings.get("source", []):
result[source["name"]] = RepositoryConfig(**source, config_prefix="pypi")
def merge_sources(other_sources: Iterable[RepositoryConfig]) -> None:
for source in other_sources:
name = source.name
if name in result:
result[name].passive_update(source)
elif include_stored:
result[name] = source
merge_sources(self.project_config.iter_sources())
merge_sources(self.global_config.iter_sources())
if "pypi" in result:
result["pypi"].passive_update(self.default_source)
elif include_stored:
# put pypi source at the beginning
result = {"pypi": self.default_source, **result}
sources: list[RepositoryConfig] = []
for source in result.values():
if not source.url:
continue
if expand_env:
source.url = DEFAULT_BACKEND(self.root).expand_line(expand_env_vars_in_auth(source.url))
sources.append(source)
return sources
def get_repository(
self,
cls: type[BaseRepository] | None = None,
ignore_compatibility: bool | NotSetType = NotSet,
env_spec: EnvSpec | None = None,
) -> BaseRepository:
"""Get the repository object"""
if cls is None:
cls = self.core.repository_class
sources = self.sources or []
params = get_class_init_params(cls)
if "env_spec" in params:
return cls(sources, self.environment, env_spec=env_spec)
else:
return cls(sources, self.environment, ignore_compatibility=ignore_compatibility)
def get_locked_repository(self, env_spec: EnvSpec | None = None) -> LockedRepository:
try:
lockfile = self.lockfile.open_for_read()
except ProjectError:
lockfile = {}
return LockedRepository(lockfile, self.sources, self.environment, env_spec=env_spec)
def split_extras_groups(self, all_groups: list[str]) -> tuple[list[str], list[str]]:
"""Split the groups into extras and non-extras."""
extras: list[str] = []
groups: list[str] = []
optional_groups = {normalize_name(group) for group in self.pyproject.metadata.get("optional-dependencies", [])}
for group in all_groups:
if group in optional_groups:
extras.append(group)
else:
groups.append(group)
return extras, groups
@property
def locked_repository(self) -> LockedRepository:
deprecation_warning("Project.locked_repository is deprecated, use Project.get_locked_repository() instead", 2)
return self.get_locked_repository()
def get_provider(
self,
strategy: str = "all",
tracked_names: Iterable[str] | None = None,
for_install: bool = False,
ignore_compatibility: bool | NotSetType = NotSet,
direct_minimal_versions: bool = False,
env_spec: EnvSpec | None = None,
locked_repository: LockedRepository | None = None,
) -> BaseProvider:
"""Build a provider class for resolver.
:param strategy: the resolve strategy
:param tracked_names: the names of packages that needs to update
:param for_install: if the provider is for install
:param ignore_compatibility: if the provider should ignore the compatibility when evaluating candidates
:param direct_minimal_versions: if the provider should prefer minimal versions instead of latest
:returns: The provider object
"""
import inspect
from pdm.resolver.providers import get_provider
if env_spec is None:
env_spec = (
self.environment.allow_all_spec if ignore_compatibility in (True, NotSet) else self.environment.spec
)
repo_params = inspect.signature(self.get_repository).parameters
if "env_spec" in repo_params:
repository = self.get_repository(env_spec=env_spec)
else: # pragma: no cover
repository = self.get_repository(ignore_compatibility=ignore_compatibility)
if locked_repository is None:
try:
locked_repository = self.get_locked_repository(env_spec)
except Exception: # pragma: no cover
if strategy != "all":
self.core.ui.warn("Unable to reuse the lock file as it is not compatible with PDM")
provider_class = get_provider(strategy)
params: dict[str, Any] = {}
if strategy != "all":
params["tracked_names"] = [strip_extras(name)[0] for name in tracked_names or ()]
if "locked_repository" in inspect.signature(provider_class).parameters:
params["locked_repository"] = locked_repository
else:
locked_candidates: dict[str, list[Candidate]] = (
{} if locked_repository is None else locked_repository.all_candidates
)
params["locked_candidates"] = locked_candidates
return provider_class(repository=repository, direct_minimal_versions=direct_minimal_versions, **params)
def get_reporter(
self, requirements: list[Requirement], tracked_names: Iterable[str] | None = None
) -> RichLockReporter: # pragma: no cover
"""Return the reporter object to construct a resolver.
:param requirements: requirements to resolve
:param tracked_names: the names of packages that needs to update
:param spinner: optional spinner object
:returns: a reporter
"""
from pdm.resolver.reporters import RichLockReporter
return RichLockReporter(requirements, self.core.ui)
def write_lockfile(
self, toml_data: Any = None, show_message: bool = True, write: bool = True, **_kwds: Any
) -> None:
"""Write the lock file to disk."""
if _kwds: # pragma: no cover
deprecation_warning("Extra arguments have been moved to `format_lockfile` function", stacklevel=2)
if toml_data is not None: # pragma: no cover
deprecation_warning(
"Passing toml_data to write_lockfile is deprecated, please use `format_lockfile` instead", stacklevel=2
)
self.lockfile.set_data(toml_data)
self.lockfile.update_hash(self.pyproject.content_hash("sha256"))
if write and self.enable_write_lockfile:
self.lockfile.write(show_message)
def make_self_candidate(self, editable: bool = True) -> Candidate:
from unearth import Link
from pdm.models.candidates import Candidate
req = parse_requirement(self.root.as_uri(), editable)
assert self.name
req.name = self.name
can = Candidate(req, name=self.name, link=Link.from_path(self.root))
can.prepare(self.environment).metadata
return can
def is_lockfile_hash_match(self) -> bool:
algo, hash_value = self.lockfile.hash
if not hash_value:
return False
content_hash = self.pyproject.content_hash(algo)
return content_hash == hash_value
def use_pyproject_dependencies(
self, group: str, dev: bool = False
) -> tuple[list[str], Callable[[list[str]], None]]:
"""Get the dependencies array and setter in the pyproject.toml
Return a tuple of two elements, the first is the dependencies array,
and the second value is a callable to set the dependencies array back.
"""
from pdm.formats.base import make_array
def update_dev_dependencies(deps: list[str]) -> None:
from tomlkit.container import OutOfOrderTableProxy
dependency_groups: list[str | dict[str, str]] = tomlkit.array().multiline(True)
dev_dependencies: list[str] = tomlkit.array().multiline(True)
for dep in deps:
if isinstance(dep, str) and dep.startswith("-e"):
dev_dependencies.append(dep)
else:
dependency_groups.append(dep)
if dependency_groups:
self.pyproject.dependency_groups[group] = dependency_groups
else:
self.pyproject.dependency_groups.pop(group, None)
if dev_dependencies:
settings.setdefault("dev-dependencies", {})[group] = dev_dependencies
else:
settings.setdefault("dev-dependencies", {}).pop(group, None)
if isinstance(self.pyproject._data["tool"], OutOfOrderTableProxy):
# In case of a separate table, we have to remove and re-add it to make the write correct.
# This may change the order of tables in the TOML file, but it's the best we can do.
# see bug pdm-project/pdm#2056 for details
del self.pyproject._data["tool"]["pdm"]
self.pyproject._data["tool"]["pdm"] = settings
metadata, settings = self.pyproject.metadata, self.pyproject.settings
if group == "default":
return metadata.get("dependencies", tomlkit.array()), lambda x: metadata.__setitem__("dependencies", x)
dev_dependencies = deepcopy(self.pyproject._data.get("dependency-groups", {}))
for dev_group, items in self.pyproject.settings.get("dev-dependencies", {}).items():
dev_dependencies.setdefault(dev_group, []).extend(items)
deps_setter = [
(
metadata.get("optional-dependencies", {}),
lambda x: metadata.setdefault("optional-dependencies", {}).__setitem__(group, x)
if x
else metadata.setdefault("optional-dependencies", {}).pop(group, None),
),
(dev_dependencies, update_dev_dependencies),
]
normalized_group = normalize_name(group)
for deps, setter in deps_setter:
normalized_groups = {normalize_name(g) for g in deps}
if group in deps:
return make_array(deps[group], True), setter
if normalized_group in normalized_groups:
raise PdmUsageError(f"Group {group} already exists in another non-normalized form")
# If not found, return an empty list and a setter to add the group
return tomlkit.array().multiline(True), deps_setter[int(dev)][1]
def add_dependencies(
self,
requirements: Iterable[str | Requirement],
to_group: str = "default",
dev: bool = False,
show_message: bool = True,
write: bool = True,
) -> list[Requirement]:
"""Add requirements to the given group, and return the requirements of that group."""
if isinstance(requirements, Mapping): # pragma: no cover
deprecation_warning(
"Passing a requirements map to add_dependencies is deprecated, please pass an iterable", stacklevel=2
)
requirements = requirements.values()
self.pyproject.open_for_write()
deps, setter = self.use_pyproject_dependencies(to_group, dev)
updated_indices: set[int] = set()
with cd(self.root):
parsed_deps = [(parse_line(dep) if isinstance(dep, str) else None) for dep in deps]
for req in requirements:
if isinstance(req, str):
req = parse_line(req)
matched_index = next(
(
i
for i, r in enumerate(deps)
if isinstance(r, str) and req.matches(r) and i not in updated_indices
),
None,
)
dep = req.as_line()
if matched_index is None:
updated_indices.add(len(deps))
deps.append(dep)
parsed_deps.append(req)
else:
deps[matched_index] = dep
parsed_deps[matched_index] = req
updated_indices.add(matched_index)
setter(deps)
if write:
self.pyproject.write(show_message)
for r in parsed_deps:
if r is not None:
r.groups = [to_group]
return [r for r in parsed_deps if r is not None]
def init_global_project(self) -> None:
if not self.is_global or not self.pyproject.empty():
return
self.root.mkdir(parents=True, exist_ok=True)
self.pyproject.set_data({"project": {"dependencies": ["pip", "setuptools", "wheel"]}})
self.pyproject.write()
@property
def backend(self) -> BuildBackend:
return get_backend_by_spec(self.pyproject.build_system)(self.root)
def cache(self, name: str) -> Path:
path = self.cache_dir / name
try:
path.mkdir(parents=True, exist_ok=True)
except OSError:
# The path could be not accessible
pass
return path
def make_wheel_cache(self) -> WheelCache:
from pdm.models.caches import get_wheel_cache
return get_wheel_cache(self.cache("wheels"))
@property
def package_cache(self) -> PackageCache:
return PackageCache(self.cache("packages"))
def make_candidate_info_cache(self) -> CandidateInfoCache:
from pdm.models.caches import CandidateInfoCache, EmptyCandidateInfoCache
python_hash = hashlib.sha1(str(self.environment.python_requires).encode()).hexdigest()
file_name = f"package_meta_{python_hash}.json"
return (
CandidateInfoCache(self.cache("metadata") / file_name)
if self.core.state.enable_cache
else EmptyCandidateInfoCache(self.cache("metadata") / file_name)
)
def make_hash_cache(self) -> HashCache:
from pdm.models.caches import EmptyHashCache, HashCache
return HashCache(self.cache("hashes")) if self.core.state.enable_cache else EmptyHashCache(self.cache("hashes"))
def iter_interpreters(
self,
python_spec: str | None = None,
search_venv: bool | None = None,
filter_func: Callable[[PythonInfo], bool] | None = None,
respect_version_file: bool = True,
) -> Iterable[PythonInfo]:
"""Iterate over all interpreters that matches the given specifier.
And optionally install the interpreter if not found.
"""
from packaging.version import InvalidVersion
from pdm.cli.commands.python import InstallCommand
def read_version_from_version_file(python_version_file: Path) -> str | None:
content = python_version_file.read_text().strip()
content_lines = [cl for cl in content.splitlines() if not cl.lstrip().startswith("#")]
return content_lines[0] if len(content_lines) == 1 else None
version_file = self.root.joinpath(".python-version")
found = False
if respect_version_file and not python_spec and (os.getenv("PDM_PYTHON_VERSION") or version_file.exists()):
requested = os.getenv("PDM_PYTHON_VERSION") or read_version_from_version_file(version_file)
if requested is not None and requested not in self.python_requires:
self.core.ui.warn(".python-version is found but the version is not in requires-python, ignored.")
elif requested is not None:
python_spec = requested
for interpreter in self.find_interpreters(python_spec, search_venv):
if filter_func is None or filter_func(interpreter):
found = True
yield interpreter
if found or self.is_global:
return
if not python_spec: # handle both empty string and None
# Get the best match meeting the requires-python
best_match = self.get_best_matching_cpython_version()
if best_match is None:
return
python_spec = str(best_match)
else:
try:
if python_spec not in self.python_requires:
return
except InvalidVersion:
return
try:
# otherwise if no interpreter is found, try to install it
installed = InstallCommand.install_python(self, python_spec)
except Exception as e:
self.core.ui.error(f"Failed to install Python {python_spec}: {e}")
return
else:
if filter_func is None or filter_func(installed):
yield installed
def find_interpreters(
self, python_spec: str | None = None, search_venv: bool | None = None
) -> Iterable[PythonInfo]:
"""Return an iterable of interpreter paths that matches the given specifier,
which can be:
1. a version specifier like 3.7
2. an absolute path
3. a short name like python3
4. None that returns all possible interpreters
"""
config = self.config
python: str | Path | None = None
finder_arg: str | None = None
if not python_spec:
if config.get("python.use_pyenv", True) and os.path.exists(PYENV_ROOT):
pyenv_shim = os.path.join(PYENV_ROOT, "shims", "python3")
if os.name == "nt":
pyenv_shim += ".bat"
if os.path.exists(pyenv_shim):
yield PythonInfo.from_path(pyenv_shim)
elif os.path.exists(pyenv_shim.replace("python3", "python")):
yield PythonInfo.from_path(pyenv_shim.replace("python3", "python"))
python = shutil.which("python") or shutil.which("python3")
if python:
yield PythonInfo.from_path(python)
else:
if not all(c.isdigit() for c in python_spec.split(".")):
path = Path(python_spec)
if path.exists():
python = find_python_in_path(python_spec)
if python:
yield PythonInfo.from_path(python)
return
if len(path.parts) == 1: # only check for spec with only one part
python = shutil.which(python_spec)
if python:
yield PythonInfo.from_path(python)
return
finder_arg = python_spec
if search_venv is None:
search_venv = cast(bool, config["python.use_venv"])
finder = self._get_python_finder(search_venv)
for entry in finder.find_all(finder_arg, allow_prereleases=True):
yield PythonInfo(entry)
if not python_spec:
# Lastly, return the host Python as well
this_python = getattr(sys, "_base_executable", sys.executable)
yield PythonInfo.from_path(this_python)
def _get_python_finder(self, search_venv: bool = True) -> Finder:
from findpython import Finder
from pdm.cli.commands.venv.utils import VenvProvider
providers: list[str] = self.config["python.providers"]
venv_pos = -1
if not providers:
venv_pos = 0
elif "venv" in providers:
venv_pos = providers.index("venv")
providers.remove("venv")
old_rye_root = os.getenv("RYE_PY_ROOT")
os.environ["RYE_PY_ROOT"] = os.path.expanduser(self.config["python.install_root"])
try:
finder = Finder(resolve_symlinks=True, selected_providers=providers or None)
finally:
if old_rye_root: # pragma: no cover
os.environ["RYE_PY_ROOT"] = old_rye_root
else:
del os.environ["RYE_PY_ROOT"]
if search_venv and venv_pos >= 0:
finder.add_provider(VenvProvider(self), venv_pos)
return finder
@property
def is_distribution(self) -> bool:
if not self.name:
return False
settings = self.pyproject.settings
if "package-type" in settings:
return settings["package-type"] == "library"
elif "distribution" in settings:
return cast(bool, settings["distribution"])
else:
return True
def get_setting(self, key: str) -> Any:
"""
Get a setting from its dotted key (without the `tool.pdm` prefix).
Returns `None` if the key does not exists.
"""
try:
return reduce(operator.getitem, key.split("."), self.pyproject.settings)
except KeyError:
return None
def env_or_setting(self, var: str, key: str) -> Any:
"""
Get a value from environment variable and fallback on a given setting.
Returns `None` if both the environment variable and the key does not exists.
"""
return os.getenv(var.upper()) or self.get_setting(key)
def get_best_matching_cpython_version(
self, use_minimum: bool | None = False, freethreaded: bool = False
) -> PythonVersion | None:
"""
Returns the best matching CPython version that fits requires-python, this platform and arch.
If no best match could be found, return None.
Default for best match strategy is "highest" possible interpreter version. If "minimum" shall be used,
set `use_minimum` to True.
"""
def get_version(version: PythonVersion) -> str:
return f"{version.major}.{version.minor}.{version.micro}"
all_matches = get_all_installable_python_versions(build_dir=False)
filtered_matches = [
v
for v in all_matches
if v.freethreaded == freethreaded
and get_version(v) in self.python_requires
and v.implementation.lower() == "cpython"
]
if filtered_matches:
if use_minimum:
return min(filtered_matches, key=lambda v: (v.major, v.minor, v.micro))
return max(filtered_matches, key=lambda v: (v.major, v.minor, v.micro))
return None
@property
def lock_targets(self) -> list[EnvSpec]:
return [self.environment.allow_all_spec]
def get_resolver(self, allow_uv: bool = True) -> type[Resolver]:
"""Get the resolver class to use for the project."""
from pdm.resolver.resolvelib import RLResolver
from pdm.resolver.uv import UvResolver
if allow_uv and self.config.get("use_uv"):
return UvResolver
else:
return RLResolver
def get_synchronizer(self, quiet: bool = False, allow_uv: bool = True) -> type[BaseSynchronizer]:
"""Get the synchronizer class to use for the project."""
from pdm.installers import BaseSynchronizer, Synchronizer, UvSynchronizer
from pdm.installers.uv import QuietUvSynchronizer
if allow_uv and self.config.get("use_uv"):
return QuietUvSynchronizer if quiet else UvSynchronizer
if quiet:
return BaseSynchronizer
return getattr(self.core, "synchronizer_class", Synchronizer)
| Project |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 132408,
"end": 159525
} | class ____(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# lambda_name string the internal name of a lambda 'function'
# decorators [DecoratorNode] list of decorators
# args [CArgDeclNode] formal arguments
# doc EncodedString or None
# body StatListNode
# return_type_annotation
# ExprNode or None the Py3 return type annotation
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# fused_py_func DefNode The original fused cpdef DefNode
# (in case this is a specialization)
# specialized_cpdefs [DefNode] list of specialized cpdef DefNodes
# py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
#
# decorator_indirection IndirectionNode Used to remove __Pyx_Method_ClassMethod for fused functions
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"]
outer_attrs = ["decorators", "return_type_annotation"]
is_staticmethod = False
is_classmethod = False
lambda_name = None
reqd_kw_flags_cname = "0"
is_wrapper = 0
no_assignment_synthesis = 0
decorators = None
return_type_annotation = None
entry = None
acquire_gil = 0
self_in_stararg = 0
py_cfunc_node = None
requires_classobj = False
defaults_struct = None # Dynamic kwrds structure name
doc = None
fused_py_func = False
specialized_cpdefs = None
py_wrapper = None
py_wrapper_required = True
func_cname = None
defaults_getter = None
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
# Prepare signature information for code objects.
p = k = rk = r = 0
for arg in self.args:
if arg.pos_only:
p += 1
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
self.num_posonly_args = p
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
def as_cfunction(self, cfunc=None, scope=None, overridable=True, returns=None, except_val=None, has_explicit_exc_clause=False,
modifiers=None, nogil=False, with_gil=False):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
exception_value, exception_check = except_val or (None, False)
nogil = nogil or with_gil
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name=name_declarator.name,
cname=None,
annotation=formal_arg.annotation,
type=py_object_type,
pos=formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type=py_object_type,
args=cfunc_args,
has_varargs=False,
exception_value=None,
exception_check=exception_check,
nogil=nogil,
with_gil=with_gil,
is_overridable=overridable)
cfunc = CVarDefNode(self.pos, type=cfunc_type)
else:
if scope is None:
scope = cfunc.scope
cfunc_type = cfunc.type
if cfunc_type.exception_check:
# this ensures `legacy_implicit_noexcept` does not trigger
# as it would result in a mismatch
# (declaration with except, definition with implicit noexcept)
has_explicit_exc_clause = True
if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
error(self.pos, "wrong number of arguments")
error(cfunc.pos, "previous declaration here")
for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
name_declarator, type = formal_arg.analyse(scope, nonempty=1,
is_self_arg=(i == 0 and scope.is_c_class_scope))
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
if exception_value is None and cfunc_type.exception_value is not None:
from .ExprNodes import ConstNode
exception_value = ConstNode.for_type(
self.pos, value=str(cfunc_type.exception_value), type=cfunc_type.return_type,
constant_result=cfunc_type.exception_value.python_value)
declarator = CFuncDeclaratorNode(self.pos,
base=CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args=self.args,
has_varargs=False,
exception_check=cfunc_type.exception_check,
exception_value=exception_value,
has_explicit_exc_clause = has_explicit_exc_clause,
with_gil=cfunc_type.with_gil,
nogil=cfunc_type.nogil)
return CFuncDefNode(self.pos,
modifiers=modifiers or [],
base_type=CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator=declarator,
body=self.body,
doc=self.doc,
overridable=cfunc_type.is_overridable,
type=cfunc_type,
with_gil=cfunc_type.with_gil,
nogil=cfunc_type.nogil,
visibility='private',
api=False,
directive_locals=getattr(cfunc, 'directive_locals', {}),
directive_returns=returns)
def is_cdef_func_compatible(self):
"""Determines if the function's signature is compatible with a
cdef function. This can be used before calling
.as_cfunction() to see if that will be successful.
"""
if self.needs_closure:
return False
if self.star_arg or self.starstar_arg:
return False
return True
def analyse_declarations(self, env):
if self.decorators:
for decorator in self.decorators:
func = decorator.decorator
if func.is_name:
self.is_classmethod |= func.name == 'classmethod'
self.is_staticmethod |= func.name == 'staticmethod'
if self.is_classmethod and env.lookup_here('classmethod'):
# classmethod() was overridden - not much we can do here ...
self.is_classmethod = False
if self.is_staticmethod and env.lookup_here('staticmethod'):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
if env.is_py_class_scope or env.is_c_class_scope:
if self.name == '__new__' and env.is_py_class_scope:
self.is_staticmethod = True
elif self.name == '__init_subclass__' and env.is_c_class_scope:
error(self.pos, "'__init_subclass__' is not supported by extension class")
elif self.name in IMPLICIT_CLASSMETHODS and not self.is_classmethod:
self.is_classmethod = True
# TODO: remove the need to generate a real decorator here, is_classmethod=True should suffice.
from .ExprNodes import NameNode
self.decorators = self.decorators or []
self.decorators.insert(0, DecoratorNode(
self.pos, decorator=NameNode(self.pos, name=EncodedString('classmethod'))))
self.analyse_argument_types(env)
if self.name == '<lambda>':
self.declare_lambda_function(env)
else:
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
# if a signature annotation provides a more specific return object type, use it
if self.return_type is py_object_type and self.return_type_annotation:
if env.directives['annotation_typing'] and not self.entry.is_special:
_, return_type = self.return_type_annotation.analyse_type_annotation(env)
if return_type and return_type.is_pyobject:
self.return_type = return_type
self.create_local_scope(env)
self.py_wrapper = DefNodeWrapper(
self.pos,
target=self,
name=self.entry.name,
args=self.args,
star_arg=self.star_arg,
starstar_arg=self.starstar_arg,
return_type=self.return_type)
self.py_wrapper.analyse_declarations(env)
def analyse_argument_types(self, env):
self.directive_locals = env.directives.get('locals', {})
allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
f2s = env.fused_to_specific
env.fused_to_specific = None
for arg in self.args:
if hasattr(arg, 'name'):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
# If we hare in pythran mode and we got a buffer supported by
# Pythran, we change this node to a fused type
if has_np_pythran(env) and base_type.is_pythran_expr:
base_type = PyrexTypes.FusedType([
base_type,
#PyrexTypes.PythranExpr(pythran_type(self.type, "numpy_texpr")),
base_type.org_buffer])
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
arg.type = type
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos, "Python function argument cannot have C name specification")
arg.type = arg.type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.type.is_pyobject or arg.type.is_buffer or arg.type.is_memoryviewslice:
if arg.or_none:
arg.accept_none = True
elif arg.not_none:
arg.accept_none = False
elif (arg.type.is_extension_type or arg.type.is_builtin_type
or arg.type.is_buffer or arg.type.is_memoryviewslice):
if arg.default and arg.default.constant_result is None:
# special case: def func(MyType obj = None)
arg.accept_none = True
else:
# default depends on compiler directive
arg.accept_none = allow_none_for_extension_args
else:
# probably just a plain 'object'
arg.accept_none = True
elif not arg.type.is_error:
arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
if arg.type.is_fused:
self.has_fused_arguments = True
env.fused_to_specific = f2s
if has_np_pythran(env):
self.np_args_idx = [i for i,a in enumerate(self.args) if a.type.is_numpy_buffer]
else:
self.np_args_idx = []
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
elif not (self.star_arg or self.starstar_arg) and (
not env.directives['always_allow_keywords']
or all([arg.pos_only for arg in self.args])):
# Use the simpler calling signature for zero- and one-argument pos-only functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
elif len(self.args) == 1:
if self.args[0].default is None and not self.args[0].kw_only:
self.entry.signature = TypeSlots.pyfunction_onearg
elif self.entry.signature is TypeSlots.pymethod_signature:
if len(self.args) == 1:
self.entry.signature = TypeSlots.unaryfunc
elif len(self.args) == 2:
if self.args[1].default is None and not self.args[1].kw_only:
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
nfixed = sig.max_num_fixed_args()
min_nfixed = sig.min_num_fixed_args()
if (sig is TypeSlots.pymethod_signature and nfixed == 1
and len(self.args) == 0 and self.star_arg):
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
nfixed = min_nfixed = 0
if self.is_staticmethod and env.is_c_class_scope:
nfixed = min_nfixed = 0
self.self_in_stararg = True # FIXME: why for staticmethods?
self.entry.signature = sig = copy.copy(sig)
sig.fixed_arg_format = "*"
sig.is_staticmethod = True
sig.has_generic_args = True
if ((self.is_classmethod or self.is_staticmethod) and
self.has_fused_arguments and env.is_c_class_scope):
del self.decorator_indirection.stats[:]
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
if i >= min_nfixed:
arg.is_special_method_optional = True
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
arg.hdr_type = arg.type = Builtin.type_type
else:
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
if min_nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and (arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
# Decide whether to use METH_FASTCALL
# 1. If we use METH_NOARGS or METH_O, keep that. We can only change
# METH_VARARGS to METH_FASTCALL
# 2. Special methods like __call__ always use the METH_VARGARGS
# calling convention
mf = sig.method_flags()
if mf and TypeSlots.method_varargs in mf and not self.entry.is_special:
# 3. If the function uses the full args tuple, it's more
# efficient to use METH_VARARGS. This happens when the function
# takes *args but no other positional arguments (apart from
# possibly self). We don't do the analogous check for keyword
# arguments since the kwargs dict is copied anyway.
if self.star_arg:
uses_args_tuple = True
for arg in self.args:
if (arg.is_generic and not arg.kw_only and
not arg.is_self_arg and not arg.is_type_arg):
# Other positional argument
uses_args_tuple = False
else:
uses_args_tuple = False
if not uses_args_tuple:
sig = self.entry.signature = sig.with_fastcall()
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.min_num_fixed_args()
if sig.has_generic_args:
expected_str += " or more"
elif sig.optional_object_arg_count:
expected_str += " to %d" % sig.max_num_fixed_args()
name = self.name
if self.entry.is_special:
desc = "Special method"
else:
desc = "Method"
error(self.pos, "%s %s has wrong number of arguments (%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.lookup_here(name)
if entry:
if entry.is_final_cmethod and not env.parent_type.is_final_type:
error(self.pos, "Only final types can have final Python (def/cpdef) methods")
if entry.type.is_cfunction and not entry.is_builtin_cmethod and not self.is_wrapper:
warning(self.pos, "Overriding a c(p)def method with a def method. "
"This can lead to different methods being called depending on the "
"call context. Consider using a cpdef method for both.", 5)
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
self.entry.pyfunc_cname = punycodify_name(Naming.pyfunc_prefix + prefix + name)
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = punycodify_name(Naming.funcdoc_prefix + prefix + name)
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (
entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
entry.wrapperbase_cname = punycodify_name(Naming.wrapperbase_prefix + prefix + name)
else:
entry.doc = None
def declare_lambda_function(self, env):
entry = env.declare_lambda_function(self.lambda_name, self.pos)
entry.doc = None
self.entry = entry
self.entry.pyfunc_cname = entry.cname
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_arg = 1
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
if env.directives['infer_types'] != False:
type = PyrexTypes.unspecified_type
else:
type = py_object_type
entry = env.declare_var(arg.name, type, arg.pos)
entry.is_arg = 1
entry.used = 1
entry.init = "0"
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
self.analyse_annotations(env)
if not self.needs_assignment_synthesis(env) and self.decorators:
for decorator in self.decorators[::-1]:
decorator.decorator = decorator.decorator.analyse_expressions(env)
self.py_wrapper.prepare_argument_coercion(env)
return self
def needs_assignment_synthesis(self, env, code=None):
if self.is_staticmethod:
return True
if self.specialized_cpdefs or self.entry.is_fused_specialized:
return False
if self.no_assignment_synthesis:
return False
if self.entry.is_special:
return False
if self.entry.is_anonymous:
return True
if env.is_module_scope or env.is_c_class_scope:
if code is None:
return self.local_scope.directives['binding']
else:
return code.globalstate.directives['binding']
return env.is_py_class_scope or env.is_closure_scope
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return self.entry.signature.exception_check
def generate_function_definitions(self, env, code):
if self.defaults_getter:
# defaults getter must never live in class scopes, it's always a module function
module_scope = env.global_scope()
directives_node = CompilerDirectivesNode.for_internal(self.defaults_getter, module_scope)
directives_node.generate_function_definitions(module_scope, code)
# Before closure cnames are mangled
if self.py_wrapper_required:
# func_cname might be modified by @cname
self.py_wrapper.func_cname = self.entry.func_cname
self.py_wrapper.generate_function_definitions(env, code)
FuncDefNode.generate_function_definitions(self, env, code)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
if proto_only:
if self.py_wrapper_required:
self.py_wrapper.generate_function_header(
code, with_pymethdef, True)
return
arg_code_list = []
if self.entry.signature.has_dummy_arg:
self_arg = 'PyObject *%s' % Naming.self_cname
if not self.needs_outer_scope:
self_arg = 'CYTHON_UNUSED ' + self_arg
arg_code_list.append(self_arg)
def arg_decl_code(arg):
entry = arg.entry
if entry.in_closure:
cname = entry.original_cname
else:
cname = entry.cname
decl = entry.type.declaration_code(cname)
if not entry.cf_used:
decl = 'CYTHON_UNUSED ' + decl
return decl
for arg in self.args:
arg_code_list.append(arg_decl_code(arg))
if self.star_arg:
arg_code_list.append(arg_decl_code(self.star_arg))
if self.starstar_arg:
arg_code_list.append(arg_decl_code(self.starstar_arg))
if arg_code_list:
arg_code = ', '.join(arg_code_list)
else:
arg_code = 'void' # No arguments
dc = self.return_type.declaration_code(self.entry.pyfunc_cname)
decls_code = code.globalstate['decls']
preprocessor_guard = self.get_preprocessor_guard()
if preprocessor_guard:
decls_code.putln(preprocessor_guard)
decls_code.putln(
"static %s(%s); /* proto */" % (dc, arg_code))
if preprocessor_guard:
decls_code.putln("#endif")
code.putln("static %s(%s) {" % (dc, arg_code))
def generate_argument_declarations(self, env, code):
pass
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
# Move arguments into closure if required
def put_into_closure(entry):
if entry.in_closure:
if entry.type.is_array:
# This applies to generator expressions that iterate over C arrays (and need to
# capture them by value), under most other circumstances C array arguments are dropped to
# pointers so this copy isn't used
assert entry.type.size is not None
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy({0}, {1}, sizeof({0}));".format(entry.cname, entry.original_cname))
else:
code.putln('%s = %s;' % (entry.cname, entry.original_cname))
if entry.type.is_memoryviewslice:
# TODO - at some point reference count of memoryviews should
# genuinely be unified with PyObjects
entry.type.generate_incref_memoryviewslice(code, entry.cname, True)
elif entry.xdecref_cleanup:
# mostly applies to the starstar arg - this can sometimes be NULL
# so must be xincrefed instead
code.put_var_xincref(entry)
code.put_var_xgiveref(entry)
else:
code.put_var_incref(entry)
code.put_var_giveref(entry)
for arg in self.args:
put_into_closure(arg.entry)
for arg in self.star_arg, self.starstar_arg:
if arg:
put_into_closure(arg.entry)
def generate_argument_type_tests(self, code):
pass
| DefNode |
python | facebook__pyre-check | scripts/pypi/build_pypi_package.py | {
"start": 814,
"end": 11919
} | class ____:
wheel_path: Path
source_distribution_path: Path
def get_source_distribution_and_wheel(artifact_directory: Path) -> BuildArtifacts:
wheel = list(artifact_directory.glob("**/*.whl"))
source_distribution = list(artifact_directory.glob("**/*.tar.gz"))
# make sure the appropriate numbers of files are present in the dist folder
if not len(wheel) == 1 and not len(source_distribution) == 1:
raise ValueError(f"Unexpected files found in {artifact_directory}.")
return BuildArtifacts(
source_distribution_path=source_distribution[0], wheel_path=wheel[0]
)
def _distribution_platform() -> str:
system = platform.system()
if system == "Linux":
# Currently we only ever build on Intel Linux machines.
return "-manylinux1_x86_64"
elif system == "Darwin":
if "arm" in platform.processor():
# This means we are on Apple Silicon machines.
# The minimum possible arm64 Mac version for pip is 11.0.
return "-macosx_11_0_arm64"
return "-macosx_10_11_x86_64"
else:
raise RuntimeError(f"Building on platform `{system}` is not supported.")
def _validate_typeshed(typeshed_path: Path) -> None:
path = typeshed_path.absolute() / "stdlib"
if not path.is_dir():
raise ValueError(
"The provided typeshed directory is not in the expected format: \
It does not contain a 'stdlib' directory."
)
def _validate_version(version: str) -> None:
pattern = re.compile(r"^[0-9]+\.[0-9]+\.[0-9]+$")
if not pattern.match(version):
raise ValueError("Invalid version format.")
def _mkdir_and_init(module_path: Path, version: Optional[str] = None) -> None:
module_path.mkdir()
init_path = module_path / "__init__.py"
if version is None:
init_path.touch()
else:
init_path.write_text(
f"""\
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "{version}"
"""
)
def _add_init_files(build_root: Path, version: str) -> None:
# setup.py sdist will refuse to work for directories without a `__init__.py`.
module_path = build_root / MODULE_NAME
_mkdir_and_init(module_path, version)
_mkdir_and_init(module_path / "tools")
_mkdir_and_init(module_path / "tools/upgrade")
_mkdir_and_init(module_path / "tools/upgrade/commands")
_mkdir_and_init(module_path / "client")
def _rsync_files(
filters: List[str],
source_directory: Path,
target_directory: Path,
arguments: List[str],
) -> None:
command = ["rsync"]
command.extend(arguments)
command.extend(["--filter=" + filter_string for filter_string in filters])
command.append(str(source_directory))
command.append(str(target_directory))
subprocess.run(command)
def _sync_python_files(pyre_directory: Path, build_root: Path) -> None:
target_root = build_root / MODULE_NAME
filters = ["- tests/", "+ */", "-! *.py"]
_rsync_files(filters, pyre_directory / "client", target_root, ["-avm"])
_rsync_files(
filters, pyre_directory / "tools" / "upgrade", target_root / "tools", ["-avm"]
)
def _sync_pysa_stubs(pyre_directory: Path, build_root: Path) -> None:
filters = ["+ */"]
_rsync_files(filters, pyre_directory / "stubs" / "taint", build_root, ["-avm"])
_rsync_files(
filters, pyre_directory / "stubs" / "third_party_taint", build_root, ["-avm"]
)
def _sync_stubs(pyre_directory: Path, build_root: Path) -> None:
_rsync_files(
[],
pyre_directory / "stubs",
build_root,
[
"--recursive",
"--copy-links",
"--prune-empty-dirs",
"--verbose",
"--include='django/***'",
"--include='lxml/***'",
"--exclude='*'",
],
)
def _sync_sapp_filters(pyre_directory: Path, build_root: Path) -> None:
_rsync_files(
[],
pyre_directory / "tools" / "sapp" / "pysa_filters",
build_root,
[
"--recursive",
"--prune-empty-dirs",
"--verbose",
],
)
def _sync_typeshed(build_root: Path, typeshed_path: Path) -> None:
typeshed_target = build_root / "typeshed"
_rsync_files(
["+ */", "-! *.pyi"], typeshed_path / "stdlib", typeshed_target, ["-avm"]
)
_rsync_files(
["+ */", "-! *.pyi"], typeshed_path / "stubs", typeshed_target, ["-avm"]
)
_rsync_files(
[],
typeshed_target,
build_root,
[
"--recursive",
"--copy-links",
"--prune-empty-dirs",
"--verbose",
"--chmod='+w'",
"--include='stdlib/***'",
"--exclude='*'",
],
)
def _patch_version(version: str, build_root: Path) -> None:
file_contents = f'__version__ = "{version}"'
(build_root / MODULE_NAME / "client/version.py").write_text(file_contents)
def _ensure_usable_binary_exists(pyre_directory: Path) -> None:
binary_path = pyre_directory / "source" / "_build/default/main.exe"
if not binary_path.is_file():
raise ValueError(
"The binary file does not exist. \
Have you run 'make' in the toplevel directory?"
)
result = subprocess.run(
["file", str(binary_path)],
stdout=subprocess.PIPE,
encoding="utf-8",
)
if "dynamically linked" in result.stdout and EXPECTED_LD_PATH not in result.stdout:
raise ValueError(
"The built executable appears to include an unreleasable ld path. "
f"The output of running `file` on it was {result.stdout}"
)
def _sync_binary(pyre_directory: Path, build_root: Path) -> None:
(build_root / "bin").mkdir()
shutil.copy(
pyre_directory / "source" / "_build/default/main.exe",
build_root / "bin/pyre.bin",
)
def _strip_binary(build_root: Path) -> None:
binary_path = build_root / "bin/pyre.bin"
result = subprocess.run(["strip", str(binary_path)])
if result.returncode != 0:
LOG.warning("Unable to strip debugging info from binary.")
def _sync_documentation_files(pyre_directory: Path, build_root: Path) -> None:
shutil.copy(pyre_directory / "README.md", build_root)
shutil.copy(pyre_directory / "LICENSE", build_root)
def _create_setup_configuration(build_root: Path) -> None:
setup_cfg = build_root / "setup.cfg"
setup_cfg.touch()
setup_cfg.write_text("[metadata]\nlicense_file = LICENSE")
def _create_setup_py(
pyre_directory: Path,
version: str,
build_root: Path,
dependencies: Sequence[str],
nightly: bool,
) -> None:
path = pyre_directory / "scripts/pypi/setup.py"
setup_template = path.read_text()
setup_contents = setup_template.format(
PACKAGE_NAME="pyre-check-nightly" if nightly else "pyre-check",
PACKAGE_VERSION=version,
MODULE_NAME=MODULE_NAME,
RUNTIME_DEPENDENCIES=json.dumps(dependencies),
)
(build_root / "setup.py").write_text(setup_contents)
def _run_setup_command(
pyre_directory: Path,
build_root: Path,
dependencies: Sequence[str],
version: str,
command: str,
nightly: bool,
) -> None:
with open(pyre_directory / "README.md") as f:
long_description = f.read()
old_dir = os.getcwd()
os.chdir(build_root)
run_setup(
package_name="pyre-check-nightly" if nightly else "pyre-check",
package_version=version,
module_name=MODULE_NAME,
runtime_dependencies=dependencies,
long_description=long_description,
script_name="setup.py",
script_args=[command],
)
os.chdir(old_dir)
def _rename_and_move_artifacts(
build_root: Path, output_directory: Path
) -> Tuple[Path, Path]:
dist_directory = build_root / "dist"
build_artifacts = get_source_distribution_and_wheel(dist_directory)
source_distribution = build_artifacts.source_distribution_path
wheel = build_artifacts.wheel_path
source_distribution_destination = output_directory / (
source_distribution.name.split(".tar.gz")[0]
+ _distribution_platform()
+ ".tar.gz"
)
wheel_destination = output_directory / wheel.name.replace(
"-any", _distribution_platform()
)
LOG.info(f"Moving wheel from {str(wheel)} to {str(wheel_destination)}")
wheel.replace(wheel_destination)
LOG.info(
f"Moving source distribution from {str(source_distribution)} to {str(source_distribution_destination)}"
)
source_distribution.replace(source_distribution_destination)
return source_distribution_destination, wheel_destination
def build_pypi_package(
pyre_directory: Path,
typeshed_path: Path,
version: str,
nightly: bool,
output_directory: Optional[Path] = None,
) -> None:
if output_directory is None:
output_directory = pyre_directory / "scripts" / "dist"
LOG.info(f"Output directory is {str(output_directory)}")
_validate_typeshed(typeshed_path)
_validate_version(version)
_ensure_usable_binary_exists(pyre_directory)
dependencies = [
line.strip()
for line in (pyre_directory / "requirements.txt").read_text().split("\n")
if len(line) > 0
]
with tempfile.TemporaryDirectory(prefix="pyre_package_build_") as build_path_str:
build_path = Path(build_path_str)
_add_init_files(build_path, version)
_create_setup_py(pyre_directory, version, build_path, dependencies, nightly)
_sync_python_files(pyre_directory, build_path)
_sync_pysa_stubs(pyre_directory, build_path)
_sync_stubs(pyre_directory, build_path)
_sync_typeshed(build_path, typeshed_path)
_sync_sapp_filters(pyre_directory, build_path)
_sync_binary(pyre_directory, build_path)
_strip_binary(build_path)
_sync_documentation_files(pyre_directory, build_path)
_patch_version(version, build_path)
_run_setup_command(
pyre_directory,
build_path,
dependencies,
version,
"sdist",
nightly,
)
_create_setup_configuration(build_path)
twine_check([path.as_posix() for path in (build_path / "dist").iterdir()])
_run_setup_command(
pyre_directory,
build_path,
dependencies,
version,
"bdist_wheel",
nightly,
)
source_distribution_destination, wheel_destination = _rename_and_move_artifacts(
build_path, output_directory
)
LOG.info("All done.")
LOG.info(
"\n Build artifact available at:\n {}\n".format(
str(source_distribution_destination)
)
)
LOG.info(
"\n Source distribution available at:\n {}\n".format(str(wheel_destination))
)
| BuildArtifacts |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 296107,
"end": 333032
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"admins",
"affiliated_users_with_two_factor_disabled",
"affiliated_users_with_two_factor_disabled_exist",
"allow_private_repository_forking_setting",
"allow_private_repository_forking_setting_organizations",
"allow_private_repository_forking_setting_policy_value",
"default_repository_permission_setting",
"default_repository_permission_setting_organizations",
"domains",
"enterprise_server_installations",
"ip_allow_list_enabled_setting",
"ip_allow_list_entries",
"ip_allow_list_for_installed_apps_enabled_setting",
"is_updating_default_repository_permission",
"is_updating_two_factor_requirement",
"members_can_change_repository_visibility_setting",
"members_can_change_repository_visibility_setting_organizations",
"members_can_create_internal_repositories_setting",
"members_can_create_private_repositories_setting",
"members_can_create_public_repositories_setting",
"members_can_create_repositories_setting",
"members_can_create_repositories_setting_organizations",
"members_can_delete_issues_setting",
"members_can_delete_issues_setting_organizations",
"members_can_delete_repositories_setting",
"members_can_delete_repositories_setting_organizations",
"members_can_invite_collaborators_setting",
"members_can_invite_collaborators_setting_organizations",
"members_can_make_purchases_setting",
"members_can_update_protected_branches_setting",
"members_can_update_protected_branches_setting_organizations",
"members_can_view_dependency_insights_setting",
"members_can_view_dependency_insights_setting_organizations",
"notification_delivery_restriction_enabled_setting",
"oidc_provider",
"organization_projects_setting",
"organization_projects_setting_organizations",
"outside_collaborators",
"pending_admin_invitations",
"pending_collaborator_invitations",
"pending_member_invitations",
"repository_projects_setting",
"repository_projects_setting_organizations",
"saml_identity_provider",
"saml_identity_provider_setting_organizations",
"support_entitlements",
"team_discussions_setting",
"team_discussions_setting_organizations",
"two_factor_required_setting",
"two_factor_required_setting_organizations",
)
admins = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseAdministratorConnection),
graphql_name="admins",
args=sgqlc.types.ArgDict(
(
(
"organization_logins",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="organizationLogins",
default=None,
),
),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"role",
sgqlc.types.Arg(
EnterpriseAdministratorRole, graphql_name="role", default=None
),
),
(
"order_by",
sgqlc.types.Arg(
EnterpriseMemberOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
(
"has_two_factor_enabled",
sgqlc.types.Arg(
Boolean, graphql_name="hasTwoFactorEnabled", default=None
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
affiliated_users_with_two_factor_disabled = sgqlc.types.Field(
sgqlc.types.non_null("UserConnection"),
graphql_name="affiliatedUsersWithTwoFactorDisabled",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
affiliated_users_with_two_factor_disabled_exist = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="affiliatedUsersWithTwoFactorDisabledExist",
)
allow_private_repository_forking_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="allowPrivateRepositoryForkingSetting",
)
allow_private_repository_forking_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="allowPrivateRepositoryForkingSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
allow_private_repository_forking_setting_policy_value = sgqlc.types.Field(
EnterpriseAllowPrivateRepositoryForkingPolicyValue,
graphql_name="allowPrivateRepositoryForkingSettingPolicyValue",
)
default_repository_permission_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseDefaultRepositoryPermissionSettingValue),
graphql_name="defaultRepositoryPermissionSetting",
)
default_repository_permission_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="defaultRepositoryPermissionSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(DefaultRepositoryPermissionField),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
domains = sgqlc.types.Field(
sgqlc.types.non_null("VerifiableDomainConnection"),
graphql_name="domains",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"is_verified",
sgqlc.types.Arg(Boolean, graphql_name="isVerified", default=None),
),
(
"is_approved",
sgqlc.types.Arg(Boolean, graphql_name="isApproved", default=None),
),
(
"order_by",
sgqlc.types.Arg(
VerifiableDomainOrder,
graphql_name="orderBy",
default={"field": "DOMAIN", "direction": "ASC"},
),
),
)
),
)
enterprise_server_installations = sgqlc.types.Field(
sgqlc.types.non_null("EnterpriseServerInstallationConnection"),
graphql_name="enterpriseServerInstallations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"connected_only",
sgqlc.types.Arg(
Boolean, graphql_name="connectedOnly", default=False
),
),
(
"order_by",
sgqlc.types.Arg(
EnterpriseServerInstallationOrder,
graphql_name="orderBy",
default={"field": "HOST_NAME", "direction": "ASC"},
),
),
)
),
)
ip_allow_list_enabled_setting = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListEnabledSettingValue),
graphql_name="ipAllowListEnabledSetting",
)
ip_allow_list_entries = sgqlc.types.Field(
sgqlc.types.non_null("IpAllowListEntryConnection"),
graphql_name="ipAllowListEntries",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
IpAllowListEntryOrder,
graphql_name="orderBy",
default={"field": "ALLOW_LIST_VALUE", "direction": "ASC"},
),
),
)
),
)
ip_allow_list_for_installed_apps_enabled_setting = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListForInstalledAppsEnabledSettingValue),
graphql_name="ipAllowListForInstalledAppsEnabledSetting",
)
is_updating_default_repository_permission = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="isUpdatingDefaultRepositoryPermission",
)
is_updating_two_factor_requirement = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isUpdatingTwoFactorRequirement"
)
members_can_change_repository_visibility_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="membersCanChangeRepositoryVisibilitySetting",
)
members_can_change_repository_visibility_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanChangeRepositoryVisibilitySettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
members_can_create_internal_repositories_setting = sgqlc.types.Field(
Boolean, graphql_name="membersCanCreateInternalRepositoriesSetting"
)
members_can_create_private_repositories_setting = sgqlc.types.Field(
Boolean, graphql_name="membersCanCreatePrivateRepositoriesSetting"
)
members_can_create_public_repositories_setting = sgqlc.types.Field(
Boolean, graphql_name="membersCanCreatePublicRepositoriesSetting"
)
members_can_create_repositories_setting = sgqlc.types.Field(
EnterpriseMembersCanCreateRepositoriesSettingValue,
graphql_name="membersCanCreateRepositoriesSetting",
)
members_can_create_repositories_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanCreateRepositoriesSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(
OrganizationMembersCanCreateRepositoriesSettingValue
),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
members_can_delete_issues_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="membersCanDeleteIssuesSetting",
)
members_can_delete_issues_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanDeleteIssuesSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
members_can_delete_repositories_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="membersCanDeleteRepositoriesSetting",
)
members_can_delete_repositories_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanDeleteRepositoriesSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
members_can_invite_collaborators_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="membersCanInviteCollaboratorsSetting",
)
members_can_invite_collaborators_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanInviteCollaboratorsSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
members_can_make_purchases_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseMembersCanMakePurchasesSettingValue),
graphql_name="membersCanMakePurchasesSetting",
)
members_can_update_protected_branches_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="membersCanUpdateProtectedBranchesSetting",
)
members_can_update_protected_branches_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanUpdateProtectedBranchesSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
members_can_view_dependency_insights_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="membersCanViewDependencyInsightsSetting",
)
members_can_view_dependency_insights_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="membersCanViewDependencyInsightsSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
notification_delivery_restriction_enabled_setting = sgqlc.types.Field(
sgqlc.types.non_null(NotificationRestrictionSettingValue),
graphql_name="notificationDeliveryRestrictionEnabledSetting",
)
oidc_provider = sgqlc.types.Field("OIDCProvider", graphql_name="oidcProvider")
organization_projects_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="organizationProjectsSetting",
)
organization_projects_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="organizationProjectsSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
outside_collaborators = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseOutsideCollaboratorConnection),
graphql_name="outsideCollaborators",
args=sgqlc.types.ArgDict(
(
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"order_by",
sgqlc.types.Arg(
EnterpriseMemberOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
(
"visibility",
sgqlc.types.Arg(
RepositoryVisibility, graphql_name="visibility", default=None
),
),
(
"has_two_factor_enabled",
sgqlc.types.Arg(
Boolean, graphql_name="hasTwoFactorEnabled", default=None
),
),
(
"organization_logins",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="organizationLogins",
default=None,
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pending_admin_invitations = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseAdministratorInvitationConnection),
graphql_name="pendingAdminInvitations",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"order_by",
sgqlc.types.Arg(
EnterpriseAdministratorInvitationOrder,
graphql_name="orderBy",
default={"field": "CREATED_AT", "direction": "DESC"},
),
),
(
"role",
sgqlc.types.Arg(
EnterpriseAdministratorRole, graphql_name="role", default=None
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pending_collaborator_invitations = sgqlc.types.Field(
sgqlc.types.non_null("RepositoryInvitationConnection"),
graphql_name="pendingCollaboratorInvitations",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"order_by",
sgqlc.types.Arg(
RepositoryInvitationOrder,
graphql_name="orderBy",
default={"field": "CREATED_AT", "direction": "DESC"},
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pending_member_invitations = sgqlc.types.Field(
sgqlc.types.non_null("EnterprisePendingMemberInvitationConnection"),
graphql_name="pendingMemberInvitations",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"organization_logins",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="organizationLogins",
default=None,
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
repository_projects_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="repositoryProjectsSetting",
)
repository_projects_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="repositoryProjectsSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
saml_identity_provider = sgqlc.types.Field(
"EnterpriseIdentityProvider", graphql_name="samlIdentityProvider"
)
saml_identity_provider_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="samlIdentityProviderSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(IdentityProviderConfigurationState),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
support_entitlements = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseMemberConnection),
graphql_name="supportEntitlements",
args=sgqlc.types.ArgDict(
(
(
"order_by",
sgqlc.types.Arg(
EnterpriseMemberOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
team_discussions_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue),
graphql_name="teamDiscussionsSetting",
)
team_discussions_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="teamDiscussionsSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
two_factor_required_setting = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledSettingValue),
graphql_name="twoFactorRequiredSetting",
)
two_factor_required_setting_organizations = sgqlc.types.Field(
sgqlc.types.non_null("OrganizationConnection"),
graphql_name="twoFactorRequiredSettingOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"value",
sgqlc.types.Arg(
sgqlc.types.non_null(Boolean),
graphql_name="value",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
)
),
)
| EnterpriseOwnerInfo |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 122238,
"end": 122365
} | class ____(BaseModel):
failed: "ShardCleanStatusFailedTelemetry" = Field(..., description="")
| ShardCleanStatusTelemetryOneOf2 |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/model_views.py | {
"start": 5452,
"end": 6948
} | class ____(viewsets.ReadOnlyModelViewSet):
"""
View set that varies serializer class based on request user credentials.
Viewsets using this class should have an attribute `admin_serializer_class`,
which is a serializer that might have more fields that only the builders
require. If the request is using a Build API key, this class will be returned instead.
By default read-only endpoints will be allowed,
to allow write endpoints, inherit from the proper ``rest_framework.mixins.*`` classes.
"""
def get_serializer_class(self):
try:
if self.request.build_api_key and self.admin_serializer_class is not None:
return self.admin_serializer_class
except AttributeError:
pass
return self.serializer_class
def get_queryset_for_api_key(self, api_key):
"""Queryset used when an API key is used in the request."""
raise NotImplementedError
def get_queryset(self):
"""
Filter objects by user or API key.
If an API key is present, we filter by the project associated with the key.
Otherwise, we filter using our API manager method.
With this we check if the user/api key is authorized to acccess the object.
"""
api_key = getattr(self.request, "build_api_key", None)
if api_key:
return self.get_queryset_for_api_key(api_key)
return self.model.objects.api_v2(self.request.user)
| UserSelectViewSet |
python | tensorflow__tensorflow | tensorflow/python/ops/summary_ops_v2.py | {
"start": 2351,
"end": 2689
} | class ____(threading.local):
def __init__(self):
super(_SummaryState, self).__init__()
self.is_recording = None
# TODO(slebedev): why a separate flag for DS and is it on by default?
self.is_recording_distribution_strategy = True
self.writer = None
self.step = None
_summary_state = _SummaryState()
| _SummaryState |
python | astropy__astropy | astropy/visualization/wcsaxes/transforms.py | {
"start": 4895,
"end": 5661
} | class ____(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from world to pixel coordinates.
"""
has_inverse = True
frame_in = None
@property
@abc.abstractmethod
def input_dims(self):
"""
The number of input world dimensions.
"""
@abc.abstractmethod
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform.
"""
| World2PixelTransform |
python | pytorch__pytorch | torch/_dynamo/types.py | {
"start": 3438,
"end": 3663
} | class ____(Protocol):
def __call__(
self,
guard_manager: GuardFn,
code: types.CodeType,
f_locals: dict[str, object],
index: int,
last: bool,
) -> None: ...
| DynamoGuardHook |
python | kamyu104__LeetCode-Solutions | Python/longest-cycle-in-a-graph.py | {
"start": 37,
"end": 654
} | class ____(object):
def longestCycle(self, edges):
"""
:type edges: List[int]
:rtype: int
"""
result = -1
lookup = [-1]*len(edges)
idx = 0
for i in xrange(len(edges)):
if lookup[i] != -1:
continue
start = idx
while i != -1:
if lookup[i] != -1:
break
lookup[i] = idx
idx += 1
i = edges[i]
if i != -1 and lookup[i] >= start:
result = max(result, idx-lookup[i])
return result
| Solution |
python | apache__airflow | airflow-core/src/airflow/models/callback.py | {
"start": 9507,
"end": 10460
} | class ____(Callback):
"""Used to store Dag Processor's callback requests in the DB."""
__mapper_args__ = {"polymorphic_identity": CallbackType.DAG_PROCESSOR}
def __init__(self, priority_weight: int, callback: CallbackRequest):
"""Initialize a DagProcessorCallback from a callback request."""
super().__init__(priority_weight=priority_weight)
self.fetch_method = CallbackFetchMethod.DAG_ATTRIBUTE
self.state = None
self.data |= {"req_class": callback.__class__.__name__, "req_data": callback.to_json()}
def get_callback_request(self) -> CallbackRequest:
module = import_module("airflow.callbacks.callback_requests")
callback_request_class = getattr(module, self.data["req_class"])
# Get the function (from the instance) that we need to call
from_json = getattr(callback_request_class, "from_json")
return from_json(self.data["req_data"])
| DagProcessorCallback |
python | pytest-dev__pytest | src/_pytest/skipping.py | {
"start": 5670,
"end": 6734
} | class ____:
"""The result of evaluate_skip_marks()."""
reason: str = "unconditional skip"
def evaluate_skip_marks(item: Item) -> Skip | None:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Skip(reason)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
try:
return Skip(*mark.args, **mark.kwargs)
except TypeError as e:
raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
return None
@dataclasses.dataclass(frozen=True)
| Skip |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 14470,
"end": 14829
} | class ____(HTTPSuccessful):
"""
subclass of :class:`~HTTPSuccessful`
This indicates that the server has fulfilled the request and
the user agent SHOULD reset the document view which caused the
request to be sent.
code: 205, title: Reset Content
"""
code = 205
title = 'Reset Content'
empty_body = True
| HTTPResetContent |
python | tensorflow__tensorflow | tensorflow/python/framework/python_api_parameter_converter_test.py | {
"start": 1698,
"end": 20188
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def setUp(self):
context.ensure_initialized()
super(PythonAPIWrapperTest, self).setUp()
def makeTensorConverter(self):
"""Returns a new PythonTensorConverter with the current context."""
return PythonTensorConverter(context.context())
def makeApiInfoForGenOp(self, op_name, op_func):
"""Returns a PythonAPIParameterConverter for the given gen_op."""
api_info = _pywrap_python_api_info.PythonAPIInfo(op_name)
api_info.InitializeFromRegisteredOp(op_name)
return api_info
def makeApiInfoFromParamSpecs(self,
api_name,
param_names,
input_specs,
attr_specs,
defaults=()):
"""Returns a PythonAPIParameterConverter built from the given specs."""
api_info = _pywrap_python_api_info.PythonAPIInfo(api_name)
api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names,
defaults)
return api_info
def assertParamsEqual(self, actual_params, expected_params):
"""Asserts that converted parameters have the expected values & types."""
self.assertLen(actual_params, len(expected_params))
for actual, expected in zip(actual_params, expected_params):
if isinstance(expected, list):
self.assertIsInstance(actual, list)
self.assertLen(actual, len(expected))
for actual_item, expected_item in zip(actual, expected):
self.assertParamEqual(actual_item, expected_item)
else:
self.assertParamEqual(actual, expected)
def assertParamEqual(self, actual, expected):
if isinstance(actual, tensor.Tensor):
self.assertAllEqual(actual, expected)
else:
self.assertEqual(actual, expected)
self.assertIs(type(actual), type(expected))
def assertInferredEqual(self, api_info, inferred, expected):
"""Asserts that inferred attributes have the expected values."""
inferred_type_attrs = api_info.InferredTypeAttrs()
inferred_type_list_attrs = api_info.InferredTypeListAttrs()
inferred_length_attrs = api_info.InferredLengthAttrs()
self.assertLen(inferred.types, len(inferred_type_attrs))
self.assertLen(inferred.type_lists, len(inferred_type_list_attrs))
self.assertLen(inferred.lengths, len(inferred_length_attrs))
actual = {}
for i, val in enumerate(inferred.types):
if val._type_enum == types_pb2.DT_INVALID:
val = types_pb2.DT_INVALID
actual[inferred_type_attrs[i]] = val
for i, val in enumerate(inferred.type_lists):
actual[inferred_type_list_attrs[i]] = val
for i, val in enumerate(inferred.lengths):
actual[inferred_length_attrs[i]] = val
self.assertEqual(actual, expected)
# This test constructs a PythonAPIParameterConverter for an op that expects
# a single argument, whose value is an attribute with a specified type; and
# then uses that converter to convert parameters and checks that the result
# is the expected value.
@parameterized.named_parameters([
("FloatFromFloat", "float", 5.0, 5.0),
("FloatFromInt", "float", 5, 5.0),
("FloatFromNumpyScalar", "float", np.array(5.0), 5.0),
("IntFromInt", "int", 5, 5),
("IntFromFloat", "int", 5.0, 5),
("IntFromNumpyScalar", "int", np.array(5.0), 5),
("StringFromBytes", "string", b"foo", b"foo"),
("StringFromUnicode", "string", u"foo", "foo"),
("BoolFromBool", "bool", True, True),
("TypeFromInt", "type", 1, dtypes.float32),
("TypeFromDType", "type", dtypes.int32, dtypes.int32),
("TypeFromNumpyType", "type", np.int32, dtypes.int32),
("ShapeFromShape", "shape", tensor_shape.as_shape([1, 2]),
tensor_shape.as_shape([1, 2])),
("ShapeFromInt", "shape", 1, tensor_shape.as_shape(1)),
("ShapeFromNone", "shape", None, tensor_shape.as_shape(None)),
("ShapeFromList", "shape", [1, 2, 3], tensor_shape.as_shape([1, 2, 3])),
("ListOfFloat", "list(float)", [1, 2.0, np.array(3)], [1.0, 2.0, 3.0]),
("ListOfInt", "list(int)", [1, 2.0, np.array(3)], [1, 2, 3]),
("ListOfString", "list(string)", [b"foo", u"bar"], [b"foo", u"bar"]),
("ListOfBool", "list(bool)", [True, False, True], [True, False, True]),
("ListOfType", "list(type)", [1, dtypes.int32, np.int64],
[dtypes.float32, dtypes.int32, dtypes.int64]),
("ListOfShape", "list(shape)", [1, None, [2, 3]], [
tensor_shape.as_shape(1),
tensor_shape.as_shape(None),
tensor_shape.as_shape([2, 3])
]),
])
def testConvertAttribute(self, attr_type, attr_val, expected):
api_info = self.makeApiInfoFromParamSpecs("ConvertAttributes", ["x"], {},
{"x": attr_type})
tensor_converter = self.makeTensorConverter()
params = [attr_val]
inferred = Convert(api_info, tensor_converter, params)
self.assertEqual(inferred.types, [])
self.assertEqual(inferred.type_lists, [])
self.assertEqual(inferred.lengths, [])
self.assertLen(params, 1)
actual = params[0]
self.assertEqual(actual, expected)
# Check that we got the actual types we expected. (Note that in Python,
# two values may be equal even if they have different types.)
self.assertIs(type(actual), type(expected))
if isinstance(expected, list):
self.assertLen(actual, len(expected))
for (actual_item, expected_item) in zip(actual, expected):
self.assertIs(type(actual_item), type(expected_item))
def testConvertMultipleAttributes(self):
attr_specs = {"x": "list(int)", "y": "shape", "z": "float"}
api_info = self.makeApiInfoFromParamSpecs("ConvertAttributes",
["x", "y", "z"], {}, attr_specs)
tensor_converter = self.makeTensorConverter()
params = [[1, 2.0, np.array(3.0)], [1, 2], 10]
inferred = Convert(api_info, tensor_converter, params)
self.assertEqual(inferred.types, [])
self.assertEqual(inferred.type_lists, [])
self.assertEqual(inferred.lengths, [])
self.assertLen(params, 3)
self.assertEqual(params, [[1, 2, 3], tensor_shape.as_shape([1, 2]), 10.0])
self.assertIsInstance(params[0][0], int)
self.assertIsInstance(params[1], tensor_shape.TensorShape)
self.assertIsInstance(params[2], float)
@parameterized.named_parameters([
("StringFromInt", "string", 5, "Foo argument x: Failed to convert value "
"of type 'int' to type 'string'."),
("IntFromNone", "int", None, "Foo argument x: Failed to convert value "
"of type 'NoneType' to type 'int'."),
("BoolFromInt", "bool", 0,
"Foo argument x: Failed to convert value of type 'int' to type 'bool'."),
])
def testConvertAttributeError(self, attr_type, attr_val, message):
api_info = self.makeApiInfoFromParamSpecs("Foo", ["x"], {},
{"x": attr_type})
tensor_converter = self.makeTensorConverter()
with self.assertRaisesRegex(TypeError, message):
Convert(api_info, tensor_converter, [attr_val])
@parameterized.named_parameters([
dict(
testcase_name="FixedDTypeInputs",
param_names=["x", "y"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, 2],
outputs=lambda: [Const(1), Const(2.0)],
inferred={}),
dict(
testcase_name="UnconstrainedTypeInput",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="type"),
inputs=lambda: [np.array("foo")],
outputs=lambda: [Const("foo")],
inferred=dict(T=dtypes.string)),
dict(
testcase_name="ConstrainedTypeInput",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="{int32, float, string}"),
inputs=lambda: [np.array("foo")],
outputs=lambda: [Const("foo")],
inferred=dict(T=dtypes.string)),
dict(
testcase_name="SharedTypeInputs",
param_names=["x", "y"],
input_specs=dict(x="T", y="T"),
attr_specs=dict(T="{float, int32, int64}"),
inputs=lambda: [1, np.array(2)],
outputs=lambda: [Const(1), Const(2)],
inferred=dict(T=dtypes.int32)),
dict(
testcase_name="SharedTypeInferredFromTensor",
param_names=["x", "y"],
input_specs=dict(x="T", y="T"),
attr_specs=dict(T="{float, int32, int64}"),
inputs=lambda: [1, Const(2.0)],
outputs=lambda: [Const(1.0), Const(2.0)],
inferred=dict(T=dtypes.float32)),
dict(
# If the native converted type for an input isn't in the ok_dtypes
# list, then we try the default dtype instead.
testcase_name="FallbackToDefaultDtype",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="{float, string} = DT_FLOAT"),
inputs=lambda: [1],
outputs=lambda: [Const(1.0)],
inferred=dict(T=dtypes.float32)),
dict(
testcase_name="RepeatedInput",
param_names=["x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[1, 2, 3], 4],
outputs=lambda: [[Const(1), Const(2), Const(3)],
Const(4)],
inferred=dict(T=dtypes.int32, N=3)),
dict(
testcase_name="RepeatedInputInferDTypeFromRepeated",
param_names=["x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[1, 2, Const(3.0)], 4],
outputs=lambda: [[Const(1.0), Const(2.0),
Const(3.0)],
Const(4.0)],
inferred=dict(T=dtypes.float32, N=3)),
dict(
testcase_name="RepeatedInputInferDTypeFromSingleton",
param_names=["x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[1, 2, 3], Const(4.0)],
outputs=lambda: [[Const(1.0), Const(2.0),
Const(3.0)],
Const(4.0)],
inferred=dict(T=dtypes.float32, N=3)),
dict(
testcase_name="EmptyRepeatedInput",
param_names=["x"],
input_specs=dict(x="N * T"),
attr_specs=dict(T="{float, int32} = DT_INT32", N="int"),
inputs=lambda: [[]],
outputs=lambda: [[]],
inferred=dict(T=dtypes.int32, N=0)),
dict(
testcase_name="EmptyRepeatedInputWithNoDefaultDtype",
param_names=["x"],
input_specs=dict(x="N * T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [[]],
outputs=lambda: [[]],
inferred=dict(T=types_pb2.DT_INVALID, N=0)),
dict(
testcase_name="RepeatedInputWithExplicitCountAndType",
param_names=["N", "T", "x", "y"],
input_specs=dict(x="N * T", y="T"),
attr_specs=dict(T="{float, int32}", N="int"),
inputs=lambda: [3, np.float32, [1, 2, 3], 4],
outputs=lambda:
[3, dtypes.float32, [Const(1.0), Const(2.0),
Const(3.0)],
Const(4.0)],
inferred={}),
dict(
testcase_name="ListOfTypes",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({int32, float32})"),
inputs=lambda: [[1, 2, Const(3.0)]],
outputs=lambda: [[Const(1), Const(2), Const(3.0)]],
inferred=dict(T=[dtypes.int32, dtypes.int32, dtypes.float32])),
dict(
testcase_name="EmptyListOfTypes",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({int32, float32}) >= 0"),
inputs=lambda: [[]],
outputs=lambda: [[]],
inferred=dict(T=[])),
dict(
testcase_name="MatchingListsOfTypes",
param_names=["x", "y", "z"],
input_specs=dict(x="T", y="T", z="T"),
attr_specs=dict(T="list({int32, float32})"),
inputs=lambda: [
[1, 2, constant_op.constant(3.0)], # x
[constant_op.constant(4.0), 5, 6], # y
[7, constant_op.constant(8), 9], # z
],
outputs=lambda: nest.map_structure(
constant_op.constant, #
[[1.0, 2, 3.0], [4.0, 5, 6.0], [7.0, 8, 9.0]]),
inferred=dict(T=[dtypes.float32, dtypes.int32, dtypes.float32])),
dict(
testcase_name="ExplicitListOfTypes",
param_names=["x", "T"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({int32, float32})"),
inputs=lambda: [[1, 2, constant_op.constant(3.0)],
[dtypes.int32, dtypes.float32, dtypes.float32]],
outputs=lambda: [[
constant_op.constant(1, dtypes.int32),
constant_op.constant(2, dtypes.float32),
constant_op.constant(3.0, dtypes.float32)
], [dtypes.int32, dtypes.float32, dtypes.float32]],
inferred={}),
dict(
testcase_name="NameParam",
param_names=["x", "y", "name"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, 2, "bob"],
outputs=lambda: [
constant_op.constant(1, dtypes.int32),
constant_op.constant(2, dtypes.float32), "bob"
],
inferred={}),
dict(
testcase_name="NameParamInNonstandardPosition",
param_names=["x", "name", "y"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, "bob", 2],
outputs=lambda: [
constant_op.constant(1, dtypes.int32), "bob",
constant_op.constant(2, dtypes.float32)
],
inferred={}),
dict(
testcase_name="NameParamIsNotConvertedOrModified",
param_names=["x", "y", "name"],
input_specs=dict(x="int32", y="float32"),
attr_specs={},
inputs=lambda: [1, 2, {
"foo": ["bar", "baz"]
}],
outputs=lambda: [
constant_op.constant(1, dtypes.int32),
constant_op.constant(2, dtypes.float32), {
"foo": ["bar", "baz"]
}
],
inferred={}),
dict(
# Note: there don't appear to be any real-world ops that have a
# type(list) attr whose default value is anything other than `[]`.
# But we test this case anyway.
testcase_name="ListOfTypesFallbackToDefault",
param_names=["x"],
input_specs=dict(x="T"),
attr_specs=dict(T="list({string, float32}) = [DT_FLOAT, DT_FLOAT]"),
inputs=lambda: [[1, 2.0]],
outputs=lambda: [[
constant_op.constant(1.0, dtypes.float32),
constant_op.constant(2.0, dtypes.float32)
]],
inferred=dict(T=[dtypes.float32, dtypes.float32])),
dict(
testcase_name="ComplexOp",
param_names=["a", "b", "c", "d", "e", "f", "name"],
input_specs=dict(a="X", b="N * X", e="Y", f="Y"),
attr_specs=dict(
c="list(int)",
d="string",
N="int",
X="type",
Y="list({int32, string})"),
inputs=lambda: [
[[1, 2, 3], [4, 5, 6]], # a
[[1, 2], [3, 4, 5], [6]], # b
[1, 2, 3], # c
"Foo", # d
[[1, 2], [["three"]], [4], "five"], # e
[1, "two", [[3, 4], [5, 6]], [["7"]]], # f
],
outputs=lambda: [
Const([[1, 2, 3], [4, 5, 6]]),
[Const([1, 2]), Const([3, 4, 5]),
Const([6])],
[1, 2, 3],
"Foo",
[Const([1, 2]),
Const([["three"]]),
Const([4]),
Const("five")],
[Const(1),
Const("two"),
Const([[3, 4], [5, 6]]),
Const([["7"]])],
],
inferred=dict(
N=3,
X=dtypes.int32,
Y=[dtypes.int32, dtypes.string, dtypes.int32, dtypes.string])),
])
def testConvert(self, param_names, input_specs, attr_specs, inputs, outputs,
inferred):
api_info = self.makeApiInfoFromParamSpecs("TestFunc", param_names,
input_specs, attr_specs)
tensor_converter = self.makeTensorConverter()
param_values = inputs()
actual_inferred = Convert(api_info, tensor_converter, param_values)
self.assertInferredEqual(api_info, actual_inferred, inferred)
self.assertParamsEqual(param_values, outputs())
@parameterized.named_parameters([
dict(
testcase_name="WrongDTypeForFixedDTypeInput",
param_names=["x"],
input_specs=dict(x="float"),
attr_specs={},
inputs=lambda: [constant_op.constant(1)],
message="TestFunc argument x: Expected DT_FLOAT but got DT_INT32"),
dict(
testcase_name="AddIntTensorAndFloatTensor",
param_names=["x", "y"],
input_specs=dict(x="T", y="T"),
attr_specs=dict(T="{float, int32, int64}"),
inputs=lambda: [constant_op.constant(1),
constant_op.constant(2.0)],
message="TestFunc argument y: Expected DT_INT32 but got DT_FLOAT"),
])
def testConvertError(self,
param_names,
input_specs,
attr_specs,
inputs,
message,
exception=TypeError):
api_info = self.makeApiInfoFromParamSpecs("TestFunc", param_names,
input_specs, attr_specs)
tensor_converter = self.makeTensorConverter()
param_values = inputs()
with self.assertRaisesRegex(exception, message):
Convert(api_info, tensor_converter, param_values)
if __name__ == "__main__":
googletest.main()
| PythonAPIWrapperTest |
python | astropy__astropy | astropy/units/format/generic.py | {
"start": 12689,
"end": 16953
} | class ____(Base, _GenericParserMixin):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
@classproperty
def _units(cls) -> dict[str, UnitBase]:
return get_current_unit_registry().registry
@classmethod
def _validate_unit(
cls, s: str, deprecations: DeprecatedUnitAction = DeprecatedUnitAction.WARN
) -> UnitBase:
if s in cls._unit_symbols:
s = cls._unit_symbols[s]
elif not s.isascii():
if s[0].startswith("°"):
s = "deg" if len(s) == 1 else "deg_" + s[1:]
if len(s) > 1 and s[-1] in cls._unit_suffix_symbols:
s = s[:-1] + cls._unit_suffix_symbols[s[-1]]
elif s.endswith("R\N{INFINITY}"):
s = s[:-2] + "Ry"
return cls._units[s]
@classmethod
def _invalid_unit_error_message(cls, unit: str) -> str:
return f"{unit} is not a valid unit. {cls._did_you_mean_units(unit)}"
_unit_symbols: ClassVar[dict[str, str]] = {
"%": "percent",
"\N{PRIME}": "arcmin",
"\N{DOUBLE PRIME}": "arcsec",
"\N{MODIFIER LETTER SMALL H}": "hourangle",
"e\N{SUPERSCRIPT MINUS}": "electron",
}
_unit_suffix_symbols: ClassVar[dict[str, str]] = {
"\N{CIRCLED DOT OPERATOR}": "sun",
"\N{SUN}": "sun",
"\N{CIRCLED PLUS}": "earth",
"\N{EARTH}": "earth",
"\N{JUPITER}": "jupiter",
"\N{LATIN SUBSCRIPT SMALL LETTER E}": "_e",
"\N{LATIN SUBSCRIPT SMALL LETTER P}": "_p",
}
_translations: ClassVar[dict[int, str]] = str.maketrans({"\N{MINUS SIGN}": "-"})
"""Character translations that should be applied before parsing a string."""
_superscripts: Final[str] = (
"\N{SUPERSCRIPT MINUS}"
"\N{SUPERSCRIPT PLUS SIGN}"
"\N{SUPERSCRIPT ZERO}"
"\N{SUPERSCRIPT ONE}"
"\N{SUPERSCRIPT TWO}"
"\N{SUPERSCRIPT THREE}"
"\N{SUPERSCRIPT FOUR}"
"\N{SUPERSCRIPT FIVE}"
"\N{SUPERSCRIPT SIX}"
"\N{SUPERSCRIPT SEVEN}"
"\N{SUPERSCRIPT EIGHT}"
"\N{SUPERSCRIPT NINE}"
)
_superscript_translations: ClassVar[dict[int, int]] = str.maketrans(
_superscripts, "-+0123456789"
)
_regex_superscript: ClassVar[Pattern[str]] = re.compile(
f"[{_superscripts}]?[{_superscripts[2:]}]+"
)
@classmethod
def _convert_superscript(cls, m: Match[str]) -> str:
return f"({m.group().translate(cls._superscript_translations)})"
@classmethod
def parse(cls, s: str, debug: bool = False) -> UnitBase:
if not isinstance(s, str):
s = s.decode("ascii")
elif not s.isascii():
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize("NFC", s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
result = cls._do_parse(s, debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count("/")
if n_slashes > 1 and (n_slashes - len(re.findall(r"\(\d+/\d+\)", s))) > 1:
warnings.warn(
f"'{s}' contains multiple slashes, which is "
"discouraged by the FITS standard",
UnitsWarning,
)
return result
@classmethod
def format_exponential_notation(
cls, val: UnitScale | np.number, format_spec: str = "g"
) -> str:
return format(val, format_spec)
| Generic |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 4715,
"end": 5739
} | class ____:
def __deepcopy__(self, memo):
# Any references to objects further up the tree should not be deep-copied.
# However, if they're in memo (because they've already been deep-copied because
# we're copying from far enough up the tree) then they should be replaced
# with the memorised value.
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in self.uptree_ref_attrs:
# Note that memo being keyed by "id" is a bit of an implementation detail;
# the documentation says to treat it as opaque.
v = memo.get(id(v), v)
else:
v = copy.deepcopy(v, memo)
setattr(result, k, v)
return result
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
#return add_metaclass(CheckAnalysers)(cls)
return cls
@_with_metaclass
| CopyWithUpTreeRefsMixin |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 3082,
"end": 3326
} | class ____(BaseModel, extra="forbid"):
ids: List["ExtendedPointId"] = Field(..., description="")
vectors: "BatchVectorStruct" = Field(..., description="")
payloads: Optional[List["Payload"]] = Field(default=None, description="")
| Batch |
python | pypa__warehouse | tests/unit/manage/test_forms.py | {
"start": 16323,
"end": 20541
} | class ____:
def test_validate(self):
user_service = pretend.stub(
verify_webauthn_credential=lambda *a, **kw: pretend.stub(),
get_webauthn_by_label=lambda *a: None,
)
user_id = pretend.stub()
challenge = pretend.stub()
rp_id = pretend.stub()
origin = pretend.stub()
form = forms.ProvisionWebAuthnForm(
formdata=MultiDict({"label": "label", "credential": "{}"}),
user_service=user_service,
user_id=user_id,
challenge=challenge,
rp_id=rp_id,
origin=origin,
)
assert form.user_service is user_service
assert form.user_id is user_id
assert form.challenge is challenge
assert form.rp_id is rp_id
assert form.origin is origin
assert form.validate(), str(form.errors)
def test_verify_assertion_invalid_json(self):
user_service = pretend.stub(
get_webauthn_by_label=pretend.call_recorder(lambda *a: None)
)
form = forms.ProvisionWebAuthnForm(
formdata=MultiDict({"credential": "invalid json", "label": "fake label"}),
user_service=user_service,
user_id=pretend.stub(),
challenge=pretend.stub(),
rp_id=pretend.stub(),
origin=pretend.stub(),
)
assert not form.validate()
assert (
form.credential.errors.pop() == "Invalid WebAuthn credential: Bad payload"
)
def test_verify_assertion_invalid(self):
user_service = pretend.stub(
verify_webauthn_credential=pretend.raiser(
webauthn.RegistrationRejectedError("Fake exception")
),
get_webauthn_by_label=pretend.call_recorder(lambda *a: None),
)
form = forms.ProvisionWebAuthnForm(
formdata=MultiDict({"credential": "{}", "label": "fake label"}),
user_service=user_service,
user_id=pretend.stub(),
challenge=pretend.stub(),
rp_id=pretend.stub(),
origin=pretend.stub(),
)
assert not form.validate()
assert form.credential.errors.pop() == "Fake exception"
def test_verify_label_missing(self):
user_service = pretend.stub(
verify_webauthn_credential=lambda *a, **kw: pretend.stub()
)
form = forms.ProvisionWebAuthnForm(
formdata=MultiDict({"credential": "{}"}),
user_service=user_service,
user_id=pretend.stub(),
challenge=pretend.stub(),
rp_id=pretend.stub(),
origin=pretend.stub(),
)
assert not form.validate()
assert form.label.errors.pop() == "Specify a label"
def test_verify_label_already_in_use(self):
user_service = pretend.stub(
verify_webauthn_credential=lambda *a, **kw: pretend.stub(),
get_webauthn_by_label=pretend.call_recorder(lambda *a: pretend.stub()),
)
form = forms.ProvisionWebAuthnForm(
formdata=MultiDict({"credential": "{}", "label": "fake label"}),
user_service=user_service,
user_id=pretend.stub(),
challenge=pretend.stub(),
rp_id=pretend.stub(),
origin=pretend.stub(),
)
assert not form.validate()
assert form.label.errors.pop() == "Label 'fake label' already in use"
def test_creates_validated_credential(self):
fake_validated_credential = object()
user_service = pretend.stub(
verify_webauthn_credential=lambda *a, **kw: fake_validated_credential,
get_webauthn_by_label=pretend.call_recorder(lambda *a: None),
)
form = forms.ProvisionWebAuthnForm(
formdata=MultiDict({"credential": "{}", "label": "fake label"}),
user_service=user_service,
user_id=pretend.stub(),
challenge=pretend.stub(),
rp_id=pretend.stub(),
origin=pretend.stub(),
)
assert form.validate(), str(form.errors)
assert form.validated_credential is fake_validated_credential
| TestProvisionWebAuthnForm |
python | joke2k__faker | faker/providers/date_time/sl_SI/__init__.py | {
"start": 46,
"end": 787
} | class ____(DateTimeProvider):
DAY_NAMES = {
"0": "Nedelja",
"1": "Ponedeljek",
"2": "Torek",
"3": "Sreda",
"4": "Četrtek",
"5": "Petek",
"6": "Sobota",
}
MONTH_NAMES = {
"01": "Januar",
"02": "Februar",
"03": "Marec",
"04": "April",
"05": "Maj",
"06": "Junij",
"07": "Julij",
"08": "Avgust",
"09": "September",
"10": "Oktober",
"11": "November",
"12": "December",
}
def day_of_week(self) -> str:
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | apache__airflow | airflow-core/src/airflow/hooks/base.py | {
"start": 891,
"end": 4189
} | class ____(Protocol):
"""
Interface that providers *can* implement to be discovered by ProvidersManager.
It is not used by any of the Hooks, but simply methods and class fields described here are
implemented by those Hooks. Each method is optional -- only implement the ones you need.
The conn_name_attr, default_conn_name, conn_type should be implemented by those
Hooks that want to be automatically mapped from the connection_type -> Hook when get_hook method
is called with connection_type.
Additionally hook_name should be set when you want the hook to have a custom name in the UI selection
Name. If not specified, conn_name will be used.
The "get_ui_field_behaviour" and "get_connection_form_widgets" are optional - override them if you want
to customize the Connection Form screen. You can add extra widgets to parse your extra fields via the
get_connection_form_widgets method as well as hide or relabel the fields or pre-fill
them with placeholders via get_ui_field_behaviour method.
Note that the "get_ui_field_behaviour" and "get_connection_form_widgets" need to be set by each class
in the class hierarchy in order to apply widget customizations.
For example, even if you want to use the fields from your parent class, you must explicitly
have a method on *your* class:
.. code-block:: python
@classmethod
def get_ui_field_behaviour(cls):
return super().get_ui_field_behaviour()
You also need to add the Hook class name to list 'hook_class_names' in provider.yaml in case you
build an internal provider or to return it in dictionary returned by provider_info entrypoint in the
package you prepare.
You can see some examples in airflow/providers/jdbc/hooks/jdbc.py.
"""
conn_name_attr: str
default_conn_name: str
conn_type: str
hook_name: str
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""
Return dictionary of widgets to be added for the hook to handle extra values.
If you have class hierarchy, usually the widgets needed by your class are already
added by the base class, so there is no need to implement this method. It might
actually result in warning in the logs if you try to add widgets that have already
been added by the base class.
Note that values of Dict should be of wtforms.Field type. It's not added here
for the efficiency of imports.
"""
...
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""
Attributes of the UI field.
Returns dictionary describing customizations to implement in javascript handling the
connection form. Should be compliant with airflow/customized_form_field_behaviours.schema.json'
If you change conn_type in a derived class, you should also
implement this method and return field customizations appropriate to your Hook. This
is because the child hook will have usually different conn_type and the customizations
are per connection type.
.. seealso::
:class:`~airflow.providers.google.cloud.hooks.compute_ssh.ComputeSSH` as an example
"""
...
| DiscoverableHook |
python | pypa__warehouse | warehouse/oidc/models/google.py | {
"start": 4296,
"end": 5462
} | class ____(GooglePublisherMixin, PendingOIDCPublisher):
__tablename__ = "pending_google_oidc_publishers"
__mapper_args__ = {"polymorphic_identity": "pending_google_oidc_publishers"}
__table_args__ = ( # type: ignore[assignment]
UniqueConstraint(
"email",
"sub",
name="_pending_google_oidc_publisher_uc",
),
)
id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True), ForeignKey(PendingOIDCPublisher.id), primary_key=True
)
def reify(self, session: Session) -> GooglePublisher:
"""
Returns a `GooglePublisher` for this `PendingGooglePublisher`,
deleting the `PendingGooglePublisher` in the process.
"""
maybe_publisher = (
session.query(GooglePublisher)
.filter(
GooglePublisher.email == self.email,
GooglePublisher.sub == self.sub,
)
.one_or_none()
)
publisher = maybe_publisher or GooglePublisher(
email=self.email,
sub=self.sub,
)
session.delete(self)
return publisher
| PendingGooglePublisher |
python | getsentry__sentry | tests/sentry/relocation/api/serializers/test_relocation.py | {
"start": 559,
"end": 10481
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.owner = self.create_user(
email="owner", is_superuser=False, is_staff=True, is_active=True
)
self.superuser = self.create_user(
"superuser", is_superuser=True, is_staff=True, is_active=True
)
self.login_as(user=self.superuser, superuser=True)
self.first_imported_user = self.create_user(email="first@example.com")
self.second_imported_user = self.create_user(email="second@example.com")
self.imported_org = self.create_organization(owner=self.first_imported_user)
self.create_member(
user=self.second_imported_user, organization=self.imported_org, role="member", teams=[]
)
def mock_imported_users_and_org(self, relocation: Relocation) -> None:
ControlImportChunkReplica.objects.create(
import_uuid=relocation.uuid,
model="sentry.user",
min_ordinal=1,
max_ordinal=2,
min_source_pk=1,
max_source_pk=2,
min_inserted_pk=1,
max_inserted_pk=2,
inserted_map={1: self.first_imported_user.id, 2: self.second_imported_user.id},
)
RegionImportChunk.objects.create(
import_uuid=relocation.uuid,
model="sentry.organization",
min_ordinal=1,
max_ordinal=2,
min_source_pk=1,
max_source_pk=2,
min_inserted_pk=1,
max_inserted_pk=2,
inserted_map={1: self.imported_org.id},
)
def test_in_progress(self) -> None:
relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.superuser.id,
owner_id=self.owner.id,
status=Relocation.Status.IN_PROGRESS.value,
step=Relocation.Step.UPLOADING.value,
scheduled_pause_at_step=Relocation.Step.POSTPROCESSING.value,
want_org_slugs=["foo"],
want_usernames=["alice", "bob"],
latest_notified=None,
latest_task=OrderedTask.UPLOADING_COMPLETE.name,
latest_task_attempts=1,
)
result = serialize(relocation)
assert result["dateAdded"] == TEST_DATE_ADDED
assert result["dateUpdated"] == TEST_DATE_UPDATED
assert result["uuid"] == str(relocation.uuid)
assert result["creator"]["id"] == str(self.superuser.id)
assert result["creator"]["email"] == str(self.superuser.email)
assert result["creator"]["username"] == str(self.superuser.username)
assert result["owner"]["id"] == str(self.owner.id)
assert result["owner"]["email"] == str(self.owner.email)
assert result["owner"]["username"] == str(self.owner.username)
assert result["status"] == Relocation.Status.IN_PROGRESS.name
assert result["step"] == Relocation.Step.UPLOADING.name
assert not result["failureReason"]
assert result["scheduledPauseAtStep"] == Relocation.Step.POSTPROCESSING.name
assert not result["scheduledCancelAtStep"]
assert result["wantOrgSlugs"] == ["foo"]
assert result["wantUsernames"] == ["alice", "bob"]
assert not result["latestNotified"]
assert not result["latestUnclaimedEmailsSentAt"]
assert result["latestTask"] == OrderedTask.UPLOADING_COMPLETE.name
assert result["latestTaskAttempts"] == 1
assert result["importedUserIds"] == []
assert result["importedOrgIds"] == []
def test_pause(self) -> None:
relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.superuser.id,
owner_id=self.owner.id,
status=Relocation.Status.PAUSE.value,
step=Relocation.Step.IMPORTING.value,
want_org_slugs=["bar"],
want_usernames=["charlie", "denise"],
latest_notified=Relocation.EmailKind.STARTED.value,
latest_task=OrderedTask.IMPORTING.name,
latest_task_attempts=1,
)
self.mock_imported_users_and_org(relocation)
result = serialize(relocation)
assert result["dateAdded"] == TEST_DATE_ADDED
assert result["dateUpdated"] == TEST_DATE_UPDATED
assert result["uuid"] == str(relocation.uuid)
assert result["creator"]["id"] == str(self.superuser.id)
assert result["creator"]["email"] == str(self.superuser.email)
assert result["creator"]["username"] == str(self.superuser.username)
assert result["owner"]["id"] == str(self.owner.id)
assert result["owner"]["email"] == str(self.owner.email)
assert result["owner"]["username"] == str(self.owner.username)
assert result["status"] == Relocation.Status.PAUSE.name
assert result["step"] == Relocation.Step.IMPORTING.name
assert not result["failureReason"]
assert not result["scheduledPauseAtStep"]
assert not result["scheduledCancelAtStep"]
assert result["wantOrgSlugs"] == ["bar"]
assert result["wantUsernames"] == ["charlie", "denise"]
assert result["latestNotified"] == Relocation.EmailKind.STARTED.name
assert not result["latestUnclaimedEmailsSentAt"]
assert result["latestTask"] == OrderedTask.IMPORTING.name
assert result["latestTaskAttempts"] == 1
assert sorted(result["importedUserIds"]) == [
self.first_imported_user.id,
self.second_imported_user.id,
]
assert result["importedOrgIds"] == [self.imported_org.id]
def test_success(self) -> None:
relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.superuser.id,
owner_id=self.owner.id,
status=Relocation.Status.SUCCESS.value,
step=Relocation.Step.COMPLETED.value,
want_org_slugs=["foo"],
want_usernames=["emily", "fred"],
latest_notified=Relocation.EmailKind.SUCCEEDED.value,
latest_unclaimed_emails_sent_at=TEST_DATE_UPDATED,
latest_task=OrderedTask.COMPLETED.name,
latest_task_attempts=1,
)
self.mock_imported_users_and_org(relocation)
result = serialize(relocation)
assert result["dateAdded"] == TEST_DATE_ADDED
assert result["dateUpdated"] == TEST_DATE_UPDATED
assert result["uuid"] == str(relocation.uuid)
assert result["creator"]["id"] == str(self.superuser.id)
assert result["creator"]["email"] == str(self.superuser.email)
assert result["creator"]["username"] == str(self.superuser.username)
assert result["owner"]["id"] == str(self.owner.id)
assert result["owner"]["email"] == str(self.owner.email)
assert result["owner"]["username"] == str(self.owner.username)
assert result["status"] == Relocation.Status.SUCCESS.name
assert result["step"] == Relocation.Step.COMPLETED.name
assert not result["failureReason"]
assert not result["scheduledPauseAtStep"]
assert not result["scheduledCancelAtStep"]
assert result["wantOrgSlugs"] == ["foo"]
assert result["wantUsernames"] == ["emily", "fred"]
assert result["latestNotified"] == Relocation.EmailKind.SUCCEEDED.name
assert result["latestUnclaimedEmailsSentAt"] == TEST_DATE_UPDATED
assert result["latestTask"] == OrderedTask.COMPLETED.name
assert result["latestTaskAttempts"] == 1
assert sorted(result["importedUserIds"]) == [
self.first_imported_user.id,
self.second_imported_user.id,
]
assert result["importedOrgIds"] == [self.imported_org.id]
def test_failure(self) -> None:
relocation: Relocation = Relocation.objects.create(
date_added=TEST_DATE_ADDED,
creator_id=self.superuser.id,
owner_id=self.owner.id,
status=Relocation.Status.FAILURE.value,
step=Relocation.Step.VALIDATING.value,
scheduled_cancel_at_step=Relocation.Step.IMPORTING.value,
failure_reason="Some failure reason",
want_org_slugs=["qux"],
want_usernames=["alice", "bob"],
latest_notified=Relocation.EmailKind.FAILED.value,
latest_task=OrderedTask.VALIDATING_COMPLETE.name,
latest_task_attempts=1,
)
result = serialize(relocation)
assert result["dateAdded"] == TEST_DATE_ADDED
assert result["dateUpdated"] == TEST_DATE_UPDATED
assert result["uuid"] == str(relocation.uuid)
assert result["creator"]["id"] == str(self.superuser.id)
assert result["creator"]["email"] == str(self.superuser.email)
assert result["creator"]["username"] == str(self.superuser.username)
assert result["owner"]["id"] == str(self.owner.id)
assert result["owner"]["email"] == str(self.owner.email)
assert result["owner"]["username"] == str(self.owner.username)
assert result["status"] == Relocation.Status.FAILURE.name
assert result["step"] == Relocation.Step.VALIDATING.name
assert result["failureReason"] == "Some failure reason"
assert not result["scheduledPauseAtStep"]
assert result["scheduledCancelAtStep"] == Relocation.Step.IMPORTING.name
assert result["wantOrgSlugs"] == ["qux"]
assert result["wantUsernames"] == ["alice", "bob"]
assert result["latestNotified"] == Relocation.EmailKind.FAILED.name
assert not result["latestUnclaimedEmailsSentAt"]
assert result["latestTask"] == OrderedTask.VALIDATING_COMPLETE.name
assert result["latestTaskAttempts"] == 1
assert result["importedUserIds"] == []
assert result["importedOrgIds"] == []
| RelocationSerializerTest |
python | joke2k__faker | tests/providers/test_color.py | {
"start": 18169,
"end": 18499
} | class ____:
"""Test cs_CZ color provider methods"""
def test_safe_color_name(self, faker, num_samples):
for _ in range(num_samples):
safe_color_name = faker.safe_color_name()
assert isinstance(safe_color_name, str)
assert safe_color_name in CsCzColorProvider.safe_colors
| TestCsCz |
python | spack__spack | lib/spack/spack/util/environment.py | {
"start": 10795,
"end": 11206
} | class ____(NameValueModifier):
def execute(self, env: MutableMapping[str, str]):
tty.debug(f"RemoveFlagsEnv: {self.name}-{self.value}", level=3)
environment_value = env.get(self.name, "")
flags = environment_value.split(self.separator) if environment_value else []
flags = [f for f in flags if f != self.value]
env[self.name] = self.separator.join(flags)
| RemoveFlagsEnv |
python | coleifer__peewee | playhouse/sqlite_ext.py | {
"start": 34204,
"end": 48525
} | class ____(SqliteDatabase):
def __init__(self, database, c_extensions=None, rank_functions=True,
hash_functions=False, regexp_function=False,
bloomfilter=False, json_contains=False, *args, **kwargs):
super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
self._row_factory = None
if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
raise ImproperlyConfigured('SqliteExtDatabase initialized with '
'C extensions, but shared library was '
'not found!')
prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
if rank_functions:
if prefer_c:
register_rank_functions(self)
else:
self.register_function(bm25, 'fts_bm25')
self.register_function(rank, 'fts_rank')
self.register_function(bm25, 'fts_bm25f') # Fall back to bm25.
self.register_function(bm25, 'fts_lucene')
if hash_functions:
if not prefer_c:
raise ValueError('C extension required to register hash '
'functions.')
register_hash_functions(self)
if regexp_function:
self.register_function(_sqlite_regexp, 'regexp', 2)
if bloomfilter:
if not prefer_c:
raise ValueError('C extension required to use bloomfilter.')
register_bloomfilter(self)
if json_contains:
self.register_function(_json_contains, 'json_contains')
self._c_extensions = prefer_c
def _add_conn_hooks(self, conn):
super(SqliteExtDatabase, self)._add_conn_hooks(conn)
if self._row_factory:
conn.row_factory = self._row_factory
def row_factory(self, fn):
self._row_factory = fn
if CYTHON_SQLITE_EXTENSIONS:
SQLITE_STATUS_MEMORY_USED = 0
SQLITE_STATUS_PAGECACHE_USED = 1
SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
SQLITE_STATUS_SCRATCH_USED = 3
SQLITE_STATUS_SCRATCH_OVERFLOW = 4
SQLITE_STATUS_MALLOC_SIZE = 5
SQLITE_STATUS_PARSER_STACK = 6
SQLITE_STATUS_PAGECACHE_SIZE = 7
SQLITE_STATUS_SCRATCH_SIZE = 8
SQLITE_STATUS_MALLOC_COUNT = 9
SQLITE_DBSTATUS_LOOKASIDE_USED = 0
SQLITE_DBSTATUS_CACHE_USED = 1
SQLITE_DBSTATUS_SCHEMA_USED = 2
SQLITE_DBSTATUS_STMT_USED = 3
SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
SQLITE_DBSTATUS_CACHE_HIT = 7
SQLITE_DBSTATUS_CACHE_MISS = 8
SQLITE_DBSTATUS_CACHE_WRITE = 9
SQLITE_DBSTATUS_DEFERRED_FKS = 10
#SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
def __status__(flag, return_highwater=False):
"""
Expose a sqlite3_status() call for a particular flag as a property of
the Database object.
"""
def getter(self):
result = sqlite_get_status(flag)
return result[1] if return_highwater else result
return property(getter)
def __dbstatus__(flag, return_highwater=False, return_current=False):
"""
Expose a sqlite3_dbstatus() call for a particular flag as a property of
the Database instance. Unlike sqlite3_status(), the dbstatus properties
pertain to the current connection.
"""
def getter(self):
if self._state.conn is None:
raise ImproperlyConfigured('database connection not opened.')
result = sqlite_get_db_status(self._state.conn, flag)
if return_current:
return result[0]
return result[1] if return_highwater else result
return property(getter)
class CSqliteExtDatabase(SqliteExtDatabase):
def __init__(self, *args, **kwargs):
self._conn_helper = None
self._commit_hook = self._rollback_hook = self._update_hook = None
self._replace_busy_handler = False
super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
def init(self, database, replace_busy_handler=False, **kwargs):
super(CSqliteExtDatabase, self).init(database, **kwargs)
self._replace_busy_handler = replace_busy_handler
def _close(self, conn):
if self._commit_hook:
self._conn_helper.set_commit_hook(None)
if self._rollback_hook:
self._conn_helper.set_rollback_hook(None)
if self._update_hook:
self._conn_helper.set_update_hook(None)
return super(CSqliteExtDatabase, self)._close(conn)
def _add_conn_hooks(self, conn):
super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
self._conn_helper = ConnectionHelper(conn)
if self._commit_hook is not None:
self._conn_helper.set_commit_hook(self._commit_hook)
if self._rollback_hook is not None:
self._conn_helper.set_rollback_hook(self._rollback_hook)
if self._update_hook is not None:
self._conn_helper.set_update_hook(self._update_hook)
if self._replace_busy_handler:
timeout = self._timeout or 5
self._conn_helper.set_busy_handler(timeout * 1000)
def on_commit(self, fn):
self._commit_hook = fn
if not self.is_closed():
self._conn_helper.set_commit_hook(fn)
return fn
def on_rollback(self, fn):
self._rollback_hook = fn
if not self.is_closed():
self._conn_helper.set_rollback_hook(fn)
return fn
def on_update(self, fn):
self._update_hook = fn
if not self.is_closed():
self._conn_helper.set_update_hook(fn)
return fn
def changes(self):
return self._conn_helper.changes()
@property
def last_insert_rowid(self):
return self._conn_helper.last_insert_rowid()
@property
def autocommit(self):
return self._conn_helper.autocommit()
def backup(self, destination, pages=None, name=None, progress=None):
return backup(self.connection(), destination.connection(),
pages=pages, name=name, progress=progress)
def backup_to_file(self, filename, pages=None, name=None,
progress=None):
return backup_to_file(self.connection(), filename, pages=pages,
name=name, progress=progress)
def blob_open(self, table, column, rowid, read_only=False):
return Blob(self, table, column, rowid, read_only)
# Status properties.
memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
# Connection status properties.
lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
True)
lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
True)
cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
#cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
# False, True)
schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
def match(lhs, rhs):
return Expression(lhs, OP.MATCH, rhs)
def _parse_match_info(buf):
# See http://sqlite.org/fts3.html#matchinfo
bufsize = len(buf) # Length in bytes.
return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
def get_weights(ncol, raw_weights):
if not raw_weights:
return [1] * ncol
else:
weights = [0] * ncol
for i, weight in enumerate(raw_weights):
weights[i] = weight
return weights
# Ranking implementation, which parse matchinfo.
def rank(raw_match_info, *raw_weights):
# Handle match_info called w/default args 'pcx' - based on the example rank
# function http://sqlite.org/fts3.html#appendix_a
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
weights = get_weights(c, raw_weights)
# matchinfo X value corresponds to, for each phrase in the search query, a
# list of 3 values for each column in the search table.
# So if we have a two-phrase search query and three columns of data, the
# following would be the layout:
# p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8]
# p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
weight = weights[col_num]
if not weight:
continue
col_idx = phrase_info_idx + (col_num * 3)
# The idea is that we count the number of times the phrase appears
# in this column of the current row, compared to how many times it
# appears in this column across all rows. The ratio of these values
# provides a rough way to score based on "high value" terms.
row_hits = match_info[col_idx]
all_rows_hits = match_info[col_idx + 1]
if row_hits > 0:
score += weight * (float(row_hits) / all_rows_hits)
return -score
# Okapi BM25 ranking implementation (FTS4 only).
def bm25(raw_match_info, *args):
"""
Usage:
# Format string *must* be pcnalx
# Second parameter to bm25 specifies the index of the column, on
# the table being queries.
bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank
"""
match_info = _parse_match_info(raw_match_info)
K = 1.2
B = 0.75
score = 0.0
P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer.
term_count = match_info[P_O] # n
col_count = match_info[C_O]
total_docs = match_info[N_O] # N
L_O = A_O + col_count
X_O = L_O + col_count
# Worked example of pcnalx for two columns and two phrases, 100 docs total.
# {
# p = 2
# c = 2
# n = 100
# a0 = 4 -- avg number of tokens for col0, e.g. title
# a1 = 40 -- avg number of tokens for col1, e.g. body
# l0 = 5 -- curr doc has 5 tokens in col0
# l1 = 30 -- curr doc has 30 tokens in col1
#
# x000 -- hits this row for phrase0, col0
# x001 -- hits all rows for phrase0, col0
# x002 -- rows with phrase0 in col0 at least once
#
# x010 -- hits this row for phrase0, col1
# x011 -- hits all rows for phrase0, col1
# x012 -- rows with phrase0 in col1 at least once
#
# x100 -- hits this row for phrase1, col0
# x101 -- hits all rows for phrase1, col0
# x102 -- rows with phrase1 in col0 at least once
#
# x110 -- hits this row for phrase1, col1
# x111 -- hits all rows for phrase1, col1
# x112 -- rows with phrase1 in col1 at least once
# }
weights = get_weights(col_count, args)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if weight == 0:
continue
x = X_O + (3 * (j + i * col_count))
term_frequency = float(match_info[x]) # f(qi, D)
docs_with_term = float(match_info[x + 2]) # n(qi)
# log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
idf = math.log(
(total_docs - docs_with_term + 0.5) /
(docs_with_term + 0.5))
if idf <= 0.0:
idf = 1e-6
doc_length = float(match_info[L_O + j]) # |D|
avg_length = float(match_info[A_O + j]) or 1. # avgdl
ratio = doc_length / avg_length
num = term_frequency * (K + 1.0)
b_part = 1.0 - B + (B * ratio)
denom = term_frequency + (K * b_part)
pc_score = idf * (num / denom)
score += (pc_score * weight)
return -score
def _json_contains(src_json, obj_json):
stack = []
try:
stack.append((json.loads(obj_json), json.loads(src_json)))
except:
# Invalid JSON!
return False
while stack:
obj, src = stack.pop()
if isinstance(src, dict):
if isinstance(obj, dict):
for key in obj:
if key not in src:
return False
stack.append((obj[key], src[key]))
elif isinstance(obj, list):
for item in obj:
if item not in src:
return False
elif obj not in src:
return False
elif isinstance(src, list):
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
try:
for i in range(len(obj)):
stack.append((obj[i], src[i]))
except IndexError:
return False
elif obj not in src:
return False
elif obj != src:
return False
return True
| SqliteExtDatabase |
python | pytorch__pytorch | torch/ao/quantization/fx/graph_module.py | {
"start": 4541,
"end": 6607
} | class ____(GraphModule):
"""This class is created to make sure PackedParams
(e.g. LinearPackedParams, Conv2dPackedParams) to appear in state_dict
so that we can serialize and deserialize quantized graph module with
torch.save(m.state_dict()) and m.load_state_dict(state_dict)
"""
def __init__(
self,
root: torch.nn.Module | dict[str, Any],
graph: Graph,
preserved_attr_names: set[str],
):
self.preserved_attr_names = preserved_attr_names
preserved_attrs = {
attr: getattr(root, attr)
for attr in self.preserved_attr_names
if hasattr(root, attr)
}
super().__init__(root, graph)
for attr in preserved_attrs:
setattr(self, attr, preserved_attrs[attr])
self._register_state_dict_hook(_save_packed_weight)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
attrs_to_pop = []
for attr_name in state_dict:
if attr_name.startswith("_packed_weight") and isinstance(
state_dict[attr_name], torch._C.ScriptObject
): # type: ignore[attr-defined] # noqa: B950
setattr(self, attr_name, state_dict[attr_name])
attrs_to_pop.append(attr_name)
# pop the packed param attributesn
for attr_name in attrs_to_pop:
state_dict.pop(attr_name)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return QuantizedGraphModule(
fake_mod,
copy.deepcopy(self.graph),
copy.deepcopy(self.preserved_attr_names),
)
| QuantizedGraphModule |
python | ray-project__ray | python/ray/tests/test_autoscaler_gcp.py | {
"start": 114,
"end": 2084
} | class ____:
def __init__(self, errors: List[type]):
# List off errors to raise while retrying self.mock_method
self.errors = errors
# Incremented during each retry via self._construct_client
self.error_index = -1
# Mirrors the __init__ of GCPNodeProvider
# Also called during each retry in _retry
self._construct_clients()
def _construct_clients(self):
# In real life, called during each retry to reinitializes api clients.
# Here, increments index in list of errors passed into test.
self.error_index += 1
@_retry
def mock_method(self, *args, **kwargs):
error = self.errors[self.error_index]
if error:
raise error
return (args, kwargs)
# Short names for two types of errors
B, V = BrokenPipeError, ValueError
# BrokenPipeError is supposed to caught with up to 5 tries.
# ValueError is an arbitrarily chosen exception which should not be caught.
@pytest.mark.parametrize(
"error_input,expected_error_raised",
[
([None], None),
([B, B, B, B, None], None),
([B, B, V, B, None], V),
([B, B, B, B, B, None], B),
([B, B, B, B, B, B, None], B),
],
)
def test_gcp_broken_pipe_retry(error_input, expected_error_raised):
"""Tests retries of BrokenPipeError in GCPNodeProvider.
Args:
error_input: List of exceptions hit during retries of test mock_method.
None means no exception.
expected_error_raised: Expected exception raised.
None means no exception.
"""
provider = MockGCPNodeProvider(error_input)
if expected_error_raised:
with pytest.raises(expected_error_raised):
provider.mock_method(1, 2, a=4, b=5)
else:
ret = provider.mock_method(1, 2, a=4, b=5)
assert ret == ((1, 2), {"a": 4, "b": 5})
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| MockGCPNodeProvider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/connectors/asyncio.py | {
"start": 1782,
"end": 3042
} | class ____(Protocol):
"""protocol representing an async adapted version
of a :pep:`249` database cursor.
"""
def __aenter__(self) -> Any: ...
@property
def description(
self,
) -> _DBAPICursorDescription:
"""The description attribute of the Cursor."""
...
@property
def rowcount(self) -> int: ...
arraysize: int
lastrowid: int
async def close(self) -> None: ...
async def execute(
self,
operation: Any,
parameters: Optional[_DBAPISingleExecuteParams] = None,
) -> Any: ...
async def executemany(
self,
operation: Any,
parameters: _DBAPIMultiExecuteParams,
) -> Any: ...
async def fetchone(self) -> Optional[Any]: ...
async def fetchmany(self, size: Optional[int] = ...) -> Sequence[Any]: ...
async def fetchall(self) -> Sequence[Any]: ...
async def setinputsizes(self, sizes: Sequence[Any]) -> None: ...
def setoutputsize(self, size: Any, column: Any) -> None: ...
async def callproc(
self, procname: str, parameters: Sequence[Any] = ...
) -> Any: ...
async def nextset(self) -> Optional[bool]: ...
def __aiter__(self) -> AsyncIterator[Any]: ...
| AsyncIODBAPICursor |
python | pandas-dev__pandas | pandas/tests/plotting/test_hist_method.py | {
"start": 797,
"end": 8982
} | class ____:
@pytest.mark.parametrize("kwargs", [{}, {"grid": False}, {"figsize": (8, 10)}])
def test_hist_legacy_kwargs(self, ts, kwargs):
_check_plot_works(ts.hist, **kwargs)
@pytest.mark.parametrize("kwargs", [{}, {"bins": 5}])
def test_hist_legacy_kwargs_warning(self, ts, kwargs):
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
_check_plot_works(ts.hist, by=ts.index.month, **kwargs)
def test_hist_legacy_ax(self, ts):
fig, ax = mpl.pyplot.subplots(1, 1)
_check_plot_works(ts.hist, ax=ax, default_axes=True)
def test_hist_legacy_ax_and_fig(self, ts):
fig, ax = mpl.pyplot.subplots(1, 1)
_check_plot_works(ts.hist, ax=ax, figure=fig, default_axes=True)
def test_hist_legacy_fig(self, ts):
fig, _ = mpl.pyplot.subplots(1, 1)
_check_plot_works(ts.hist, figure=fig, default_axes=True)
def test_hist_legacy_multi_ax(self, ts):
fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2)
_check_plot_works(ts.hist, figure=fig, ax=ax1, default_axes=True)
_check_plot_works(ts.hist, figure=fig, ax=ax2, default_axes=True)
def test_hist_legacy_by_fig_error(self, ts):
fig, _ = mpl.pyplot.subplots(1, 1)
msg = (
"Cannot pass 'figure' when using the 'by' argument, since a new 'Figure' "
"instance will be created"
)
with pytest.raises(ValueError, match=msg):
ts.hist(by=ts.index, figure=fig)
def test_hist_bins_legacy(self):
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
def test_hist_layout(self, hist_df):
df = hist_df
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError, match=msg):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
@pytest.mark.parametrize(
"by, layout, axes_num, res_layout",
[
["gender", (2, 1), 2, (2, 1)],
["gender", (3, -1), 2, (3, 1)],
["category", (4, 1), 4, (4, 1)],
["category", (2, -1), 4, (2, 2)],
["category", (3, -1), 4, (3, 2)],
["category", (-1, 4), 4, (1, 4)],
["classroom", (2, 2), 3, (2, 2)],
],
)
def test_hist_layout_with_by(self, hist_df, by, layout, axes_num, res_layout):
df = hist_df
# _check_plot_works adds an `ax` kwarg to the method call
# so we get a warning about an axis being cleared, even
# though we don't explicitly pass one, see GH #13188
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(df.height.hist, by=getattr(df, by), layout=layout)
_check_axes_shape(axes, axes_num=axes_num, layout=res_layout)
def test_hist_layout_with_by_shape(self, hist_df):
df = hist_df
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
_check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
def test_hist_no_overlap(self):
x = Series(np.random.default_rng(2).standard_normal(2))
y = Series(np.random.default_rng(2).standard_normal(2))
plt.subplot(121)
x.hist()
plt.subplot(122)
y.hist()
fig = plt.gcf()
axes = fig.axes
assert len(axes) == 2
def test_hist_by_no_extra_plots(self, hist_df):
df = hist_df
df.height.hist(by=df.gender)
assert len(mpl.pyplot.get_fignums()) == 1
def test_plot_fails_when_ax_differs_from_figure(self, ts):
fig1 = plt.figure(1)
fig2 = plt.figure(2)
ax1 = fig1.add_subplot(111)
msg = "passed axis not bound to passed figure"
with pytest.raises(AssertionError, match=msg):
ts.hist(ax=ax1, figure=fig2)
@pytest.mark.parametrize(
"histtype, expected",
[
("bar", True),
("barstacked", True),
("step", False),
("stepfilled", True),
],
)
def test_histtype_argument(self, histtype, expected):
# GH23992 Verify functioning of histtype argument
ser = Series(np.random.default_rng(2).integers(1, 10))
ax = ser.hist(histtype=histtype)
_check_patches_all_filled(ax, filled=expected)
@pytest.mark.parametrize(
"by, expected_axes_num, expected_layout", [(None, 1, (1, 1)), ("b", 2, (1, 2))]
)
def test_hist_with_legend(self, by, expected_axes_num, expected_layout):
# GH 6279 - Series histogram can have a legend
index = 5 * ["1"] + 5 * ["2"]
s = Series(np.random.default_rng(2).standard_normal(10), index=index, name="a")
s.index.name = "b"
# Use default_axes=True when plotting method generate subplots itself
axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by)
_check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
_check_legend_labels(axes, "a")
@pytest.mark.parametrize("by", [None, "b"])
def test_hist_with_legend_raises(self, by):
# GH 6279 - Series histogram with legend and label raises
index = 5 * ["1"] + 5 * ["2"]
s = Series(np.random.default_rng(2).standard_normal(10), index=index, name="a")
s.index.name = "b"
with pytest.raises(ValueError, match="Cannot use both legend and label"):
s.hist(legend=True, by=by, label="c")
def test_hist_kwargs(self, ts):
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 5
_check_text_labels(ax.yaxis.get_label(), "Frequency")
def test_hist_kwargs_horizontal(self, ts):
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(bins=5, ax=ax)
ax = ts.plot.hist(orientation="horizontal", ax=ax)
_check_text_labels(ax.xaxis.get_label(), "Frequency")
def test_hist_kwargs_align(self, ts):
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(bins=5, ax=ax)
ax = ts.plot.hist(align="left", stacked=True, ax=ax)
@pytest.mark.xfail(reason="Api changed in 3.6.0")
def test_hist_kde(self, ts):
pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(logy=True, ax=ax)
_check_ax_scales(ax, yaxis="log")
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
_check_text_labels(xlabels, [""] * len(xlabels))
ylabels = ax.get_yticklabels()
_check_text_labels(ylabels, [""] * len(ylabels))
def test_hist_kde_plot_works(self, ts):
pytest.importorskip("scipy")
_check_plot_works(ts.plot.kde)
def test_hist_kde_density_works(self, ts):
pytest.importorskip("scipy")
_check_plot_works(ts.plot.density)
@pytest.mark.xfail(reason="Api changed in 3.6.0")
def test_hist_kde_logy(self, ts):
pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.kde(logy=True, ax=ax)
_check_ax_scales(ax, yaxis="log")
xlabels = ax.get_xticklabels()
_check_text_labels(xlabels, [""] * len(xlabels))
ylabels = ax.get_yticklabels()
_check_text_labels(ylabels, [""] * len(ylabels))
def test_hist_kde_color_bins(self, ts):
pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax)
_check_ax_scales(ax, yaxis="log")
assert len(ax.patches) == 10
_check_colors(ax.patches, facecolors=["b"] * 10)
def test_hist_kde_color(self, ts):
pytest.importorskip("scipy")
_, ax = mpl.pyplot.subplots()
ax = ts.plot.kde(logy=True, color="r", ax=ax)
_check_ax_scales(ax, yaxis="log")
lines = ax.get_lines()
assert len(lines) == 1
_check_colors(lines, ["r"])
| TestSeriesPlots |
python | getsentry__sentry | tests/acceptance/test_project_tags_settings.py | {
"start": 348,
"end": 1905
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.login_as(self.user)
self.path = f"/settings/{self.org.slug}/projects/{self.project.slug}/tags/"
@patch("django.utils.timezone.now", return_value=current_time)
def test_tags_list(self, mock_timezone: MagicMock) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"level": "error",
"timestamp": event_time.isoformat(),
},
project_id=self.project.id,
assert_no_errors=False,
)
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_test_id("tag-row")
self.browser.click('[data-test-id="tag-row"] [data-test-id="delete"]')
self.browser.wait_until("[role='dialog'] [data-test-id='confirm-button']")
self.browser.click("[role='dialog'] [data-test-id='confirm-button']")
self.browser.wait_until_not('[data-test-id="tag-row"]')
| ProjectTagsSettingsTest |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 16955,
"end": 17137
} | class ____(PydanticValueError):
code = 'discriminated_union.missing_discriminator'
msg_template = 'Discriminator {discriminator_key!r} is missing in value'
| MissingDiscriminator |
python | facebook__pyre-check | pyre_extensions/generic.py | {
"start": 303,
"end": 980
} | class ____(type):
def __getitem__(cls, *args) -> Any:
return cls.__class__(cls.__name__, cls.__bases__, dict(cls.__dict__))
if sys.version_info >= (3, 7):
class Generic:
"""Pyre's variadic-supporting substitute for `typing.Generic`.
By using `__class_getitem__`, this avoids a metaclass, which prevents
ugly metaclass conflicts when a child class is generic and a base class
has some metaclass."""
def __class_getitem__(cls, *args: object) -> Any:
return cls
else:
class Generic(metaclass=GenericMeta):
"""Pyre's variadic-supporting substitute for `typing.Generic`."""
pass
| GenericMeta |
python | getsentry__sentry | tests/sentry/rules/processing/test_delayed_processing.py | {
"start": 5449,
"end": 16952
} | class ____(CreateEventTestCase):
interval = "1h"
comparison_interval = "15m"
def create_events(self, comparison_type: ComparisonType) -> Event:
# Create current events for the first query
event = self.create_event(self.project.id, FROZEN_TIME, "group-1", self.environment.name)
self.create_event(self.project.id, FROZEN_TIME, "group-1", self.environment.name)
if comparison_type == ComparisonType.PERCENT:
# Create a past event for the second query
self.create_event(
self.project.id,
FROZEN_TIME - timedelta(hours=1, minutes=10),
"group-1",
self.environment.name,
)
return event
def create_condition_groups(
self, condition_data_list: Sequence[EventFrequencyConditionData]
) -> tuple[dict[UniqueConditionQuery, DataAndGroups], int, list[UniqueConditionQuery]]:
condition_groups = {}
all_unique_queries = []
for condition_data in condition_data_list:
unique_queries = generate_unique_queries(condition_data, self.environment.id)
event = self.create_events(condition_data["comparisonType"])
assert event.group
group = event.group
data_and_groups = DataAndGroups(
data=condition_data,
group_ids={event.group.id},
)
condition_groups.update({query: data_and_groups for query in unique_queries})
all_unique_queries.extend(unique_queries)
return condition_groups, group.id, all_unique_queries
def test_empty_condition_groups(self) -> None:
assert get_condition_group_results({}, self.project) == {}
@patch("sentry.rules.processing.delayed_processing.logger")
def test_nonexistent_condition(self, mock_logger: MagicMock) -> None:
nonexistent_cond_query = UniqueConditionQuery(
cls_id="fake_id", interval="", environment_id=1
)
fake_data_groups = DataAndGroups(
data=self.create_event_frequency_condition(id="fake_id"),
group_ids={1},
)
results = get_condition_group_results(
{nonexistent_cond_query: fake_data_groups}, self.project
)
assert results == {}
mock_logger.warning.assert_called_once()
@patch("sentry.rules.processing.delayed_processing.logger")
def test_fast_condition(self, mock_logger: MagicMock) -> None:
fast_cond_query = UniqueConditionQuery(
cls_id="sentry.rules.conditions.every_event.EveryEventCondition",
interval="",
environment_id=1,
)
fake_data_groups = DataAndGroups(
data=self.create_event_frequency_condition(id="fake_id"),
group_ids={1},
)
results = get_condition_group_results({fast_cond_query: fake_data_groups}, self.project)
assert results == {}
mock_logger.warning.assert_called_once()
def test_group_does_not_belong_to_project(self) -> None:
"""
Test that when the passed in project does not contain the group
referenced in condition_data, the function ignores this mismatch
entirely and still queries for those events.
"""
condition_data = self.create_event_frequency_condition(interval=self.interval)
condition_groups, group_id, unique_queries = self.create_condition_groups([condition_data])
results = get_condition_group_results(condition_groups, self.create_project())
assert results == {
unique_queries[0]: {group_id: 2},
}
def test_count_comparison_condition(self) -> None:
condition_data = self.create_event_frequency_condition(interval=self.interval)
condition_groups, group_id, unique_queries = self.create_condition_groups([condition_data])
results = get_condition_group_results(condition_groups, self.project)
assert results == {
unique_queries[0]: {group_id: 2},
}
def test_count_comparison_condition_with_upsampling(self) -> None:
"""Test that EventFrequencyCondition uses upsampled counts when upsampling is enabled"""
def create_events_with_sampling(comparison_type: ComparisonType) -> Event:
# Create current events with sample weights for the first query
event = self.create_event(
self.project.id,
FROZEN_TIME,
"group-1", # Use same fingerprint as original create_events
self.environment.name,
contexts={"error_sampling": {"client_sample_rate": 0.2}},
)
self.create_event(
self.project.id,
FROZEN_TIME,
"group-1", # Use same fingerprint as original create_events
self.environment.name,
contexts={
"error_sampling": {"client_sample_rate": 0.2}
}, # 1/5 sampling = 5x weight
)
if comparison_type == ComparisonType.PERCENT:
# Create a past event for the second query
self.create_event(
self.project.id,
FROZEN_TIME - timedelta(hours=1, minutes=10),
"group-1",
self.environment.name,
contexts={
"error_sampling": {"client_sample_rate": 0.2}
}, # 1/5 sampling = 5x weight
)
return event
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
with patch.object(self, "create_events", side_effect=create_events_with_sampling):
condition_data = self.create_event_frequency_condition(interval=self.interval)
condition_groups, group_id, unique_queries = self.create_condition_groups(
[condition_data]
)
results = get_condition_group_results(condition_groups, self.project)
# Expect upsampled count: 2 events * 5 sample_weight = 10
assert results == {
unique_queries[0]: {group_id: 10},
}
def test_percent_comparison_condition(self) -> None:
condition_data = self.create_event_frequency_condition(
interval=self.interval,
comparison_type=ComparisonType.PERCENT,
comparison_interval=self.comparison_interval,
)
condition_groups, group_id, unique_queries = self.create_condition_groups([condition_data])
results = get_condition_group_results(condition_groups, self.project)
present_percent_query, offset_percent_query = unique_queries
assert results == {
present_percent_query: {group_id: 2},
offset_percent_query: {group_id: 1},
}
def test_percent_comparison_condition_with_upsampling(self) -> None:
"""Test that EventFrequencyCondition uses upsampled counts for percent comparison when upsampling is enabled"""
def create_events_with_sampling(comparison_type: ComparisonType) -> Event:
# Create current events with sample weights for the first query
event = self.create_event(
self.project.id,
FROZEN_TIME,
"group-1", # Use same fingerprint as original create_events
self.environment.name,
contexts={
"error_sampling": {"client_sample_rate": 0.2}
}, # 1/5 sampling = 5x weight
)
self.create_event(
self.project.id,
FROZEN_TIME,
"group-1", # Use same fingerprint as original create_events
self.environment.name,
contexts={
"error_sampling": {"client_sample_rate": 0.2}
}, # 1/5 sampling = 5x weight
)
if comparison_type == ComparisonType.PERCENT:
# Create a past event for the second query with sample weights
self.create_event(
self.project.id,
FROZEN_TIME - timedelta(hours=1, minutes=10),
"group-1",
self.environment.name,
contexts={
"error_sampling": {"client_sample_rate": 0.2}
}, # 1/5 sampling = 5x weight
)
return event
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
with patch.object(self, "create_events", side_effect=create_events_with_sampling):
condition_data = self.create_event_frequency_condition(
interval=self.interval,
comparison_type=ComparisonType.PERCENT,
comparison_interval=self.comparison_interval,
)
condition_groups, group_id, unique_queries = self.create_condition_groups(
[condition_data]
)
results = get_condition_group_results(condition_groups, self.project)
present_percent_query, offset_percent_query = unique_queries
# Expect upsampled counts:
# Present period: 2 events * 5 sample_weight = 10
# Offset period: 1 event * 5 sample_weight = 5
assert results == {
present_percent_query: {group_id: 10},
offset_percent_query: {group_id: 5},
}
def test_count_percent_nonexistent_fast_conditions_together(self) -> None:
"""
Test that a percent and count condition are processed as expected, and
that nonexistent and fast conditions are ignored.
"""
count_data = self.create_event_frequency_condition(interval=self.interval)
percent_data = self.create_event_frequency_condition(
interval=self.interval,
comparison_type=ComparisonType.PERCENT,
comparison_interval=self.comparison_interval,
)
condition_groups, group_id, unique_queries = self.create_condition_groups(
[count_data, percent_data]
)
nonexistent_cond_query = UniqueConditionQuery(
cls_id="fake_id", interval="", environment_id=1
)
fast_cond_query = UniqueConditionQuery(
cls_id="sentry.rules.conditions.every_event.EveryEventCondition",
interval="",
environment_id=1,
)
fake_data_groups = DataAndGroups(
data=self.create_event_frequency_condition(id="fake_id"),
group_ids={1},
)
condition_groups.update(
{nonexistent_cond_query: fake_data_groups, fast_cond_query: fake_data_groups}
)
results = get_condition_group_results(condition_groups, self.project)
count_query, present_percent_query, offset_percent_query = unique_queries
# The count query and first percent query should be identical
assert count_query == present_percent_query
# We should only query twice b/c the count query and first percent query
# share a single scan.
assert results == {
count_query: {group_id: 4},
offset_percent_query: {group_id: 1},
}
| GetConditionGroupResultsTest |
python | django__django | tests/migrations/test_writer.py | {
"start": 1826,
"end": 1895
} | class ____(enum.Enum):
A = b"a-value"
B = b"value-b"
| BinaryEnum |
python | pennersr__django-allauth | allauth/headless/mfa/views.py | {
"start": 8240,
"end": 9763
} | class ____(SignupView):
input_class = {
"POST": SignupWebAuthnInput,
"PUT": CreateWebAuthnInput,
}
by_passkey = True
def get(self, request, *args, **kwargs):
resp = self._require_stage()
if resp:
return resp
creation_options = webauthn_flows.begin_registration(
request, self.stage.login.user, passwordless=True, signup=True
)
return response.AddWebAuthnResponse(request, creation_options)
def _prep_stage(self):
if hasattr(self, "stage"):
return self.stage
self.stage = get_pending_stage(self.request)
return self.stage
def _require_stage(self):
self._prep_stage()
if not self.stage or self.stage.key != PasskeySignupStage.key:
return ConflictResponse(self.request)
return None
def get_input_kwargs(self):
ret = super().get_input_kwargs()
self._prep_stage()
if self.stage and self.request.method == "PUT":
ret["user"] = self.stage.login.user
return ret
def put(self, request, *args, **kwargs):
resp = self._require_stage()
if resp:
return resp
webauthn_flows.signup_authenticator(
request,
user=self.stage.login.user,
name=self.input.cleaned_data["name"],
credential=self.input.cleaned_data["credential"],
)
self.stage.exit()
return AuthenticationResponse(request)
| SignupWebAuthnView |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/translator.py | {
"start": 7131,
"end": 8364
} | class ____:
"""Translator class which converts a `AirbyteConnectionTableProps` object into AssetSpecs.
Subclass this class to implement custom logic how to translate Airbyte content into asset spec.
"""
def get_asset_spec(self, props: AirbyteConnectionTableProps) -> AssetSpec:
"""Get the AssetSpec for a table synced by an Airbyte connection."""
table_schema_props = (
props.json_schema.get("properties")
or props.json_schema.get("items", {}).get("properties")
or {}
)
column_schema = generate_table_schema(table_schema_props)
metadata = {
**TableMetadataSet(
column_schema=column_schema,
table_name=props.fully_qualified_table_name,
),
**AirbyteMetadataSet(
connection_id=props.connection_id,
connection_name=props.connection_name,
stream_prefix=props.stream_prefix,
),
}
return AssetSpec(
key=AssetKey(props.table_name),
metadata=metadata,
kinds={"airbyte", *({props.destination_type} if props.destination_type else set())},
)
| DagsterAirbyteTranslator |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/generic_utils.py | {
"start": 4491,
"end": 5175
} | class ____(object):
"""A context manager for disabling handling of shared objects.
Disables shared object handling for both saving and loading.
Created primarily for use with `clone_model`, which does extra surgery that
is incompatible with shared objects.
"""
def __enter__(self):
SHARED_OBJECT_DISABLED.disabled = True
self._orig_loading_scope = _shared_object_loading_scope()
self._orig_saving_scope = _shared_object_saving_scope()
def __exit__(self, *args, **kwargs):
SHARED_OBJECT_DISABLED.disabled = False
SHARED_OBJECT_LOADING.scope = self._orig_loading_scope
SHARED_OBJECT_SAVING.scope = self._orig_saving_scope
| DisableSharedObjectScope |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 28065,
"end": 28218
} | class ____(greentest.TestCase):
def test_killall_raw(self):
g = gevent.spawn_raw(lambda: 1)
gevent.killall([g])
| TestKillallRawGreenlet |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/index_flat_map_test.py | {
"start": 7936,
"end": 11562
} | class ____(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
repetitions=[1, 3],
symbolic_checkpoint=[True, False])))
def test_index_flat_map(
self,
verify_fn: Callable[..., None],
repetitions: int,
symbolic_checkpoint: bool):
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = index_flat_map_op.index_flat_map(
dataset, _split, _get_index_map_func(_get_metadata(input_data)))
if repetitions > 1:
dataset = dataset.repeat(repetitions)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(self, _build_dataset, num_outputs=9 * repetitions)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
repetitions=[1, 3],
reshuffle_each_iteration=[True, False],
symbolic_checkpoint=[True, False])))
def test_global_shuffle(
self,
verify_fn: Callable[..., None],
repetitions: list[int],
reshuffle_each_iteration: bool,
symbolic_checkpoint: bool):
input_data = ["0 1", "2 3 4 5", "6 7", "8"]
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.from_tensor_slices(input_data)
dataset = index_flat_map_op.index_flat_map(
dataset,
_split,
_get_index_map_func(_get_metadata(input_data)),
output_cardinality=9)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=9 * repetitions,
assert_items_equal=reshuffle_each_iteration)
def _split(element: str) -> tensor.Tensor:
return ragged_string_ops.string_split_v2(element, " ")
def _get_metadata(input_data: list[str]) -> tensor.Tensor:
"""Given a list of strings, creates a metadata matrix."""
metadata = []
for i, data in enumerate(input_data):
split_data = data.split()
last_index = metadata[-1][1] if metadata else 0
metadata.append((last_index, last_index + len(split_data), i))
return constant_op.constant(metadata, dtype=dtypes.int64)
def _get_index_map_func(
metadata: tensor.Tensor) -> Callable[[int], tuple[int, int]]:
"""Turns a `metadata` Tensor into an index map function."""
def _index_map_func(index: Union[int, tensor.Tensor]) -> tuple[int, int]:
element_index = 0
while (element_index < metadata.shape[0] and
index >= array_ops.gather_nd(metadata, [element_index, 1])):
element_index += 1
offset = (
index - array_ops.gather_nd(metadata, [element_index, 0])
if element_index < metadata.shape[0]
else constant_op.constant(0, dtype=dtypes.int64))
return (element_index, offset)
return _index_map_func
if __name__ == "__main__":
test.main()
| IndexFlatMapCheckpointTest |
python | dask__dask | dask/dataframe/io/parquet/core.py | {
"start": 620,
"end": 2739
} | class ____(DataFrameIOFunction):
"""
Parquet Function-Wrapper Class
Reads parquet data from disk to produce a partition
(given a `part` argument).
"""
def __init__(
self,
engine,
fs,
meta,
columns,
index,
dtype_backend,
kwargs,
common_kwargs,
):
self.engine = engine
self.fs = fs
self.meta = meta
self._columns = columns
self.index = index
self.dtype_backend = dtype_backend
# `kwargs` = user-defined kwargs to be passed
# identically for all partitions.
#
# `common_kwargs` = kwargs set by engine to be
# passed identically for all
# partitions.
self.common_kwargs = toolz.merge(common_kwargs, kwargs or {})
@property
def columns(self):
return self._columns
def project_columns(self, columns):
"""Return a new ParquetFunctionWrapper object
with a sub-column projection.
"""
if columns == self.columns:
return self
return ParquetFunctionWrapper(
self.engine,
self.fs,
self.meta,
columns,
self.index,
self.dtype_backend,
None, # Already merged into common_kwargs
self.common_kwargs,
)
def __call__(self, part):
if not isinstance(part, list):
part = [part]
return read_parquet_part(
self.fs,
self.engine,
self.meta,
[
# Temporary workaround for HLG serialization bug
# (see: https://github.com/dask/dask/issues/8581)
(
(p.data["piece"], p.data.get("kwargs", {}))
if hasattr(p, "data")
else (p["piece"], p.get("kwargs", {}))
)
for p in part
],
self.columns,
self.index,
self.common_kwargs,
)
| ParquetFunctionWrapper |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1574435,
"end": 1574658
} | class ____(VegaLiteSchema):
"""Vector2Vector2number schema wrapper."""
_schema = {"$ref": "#/definitions/Vector2<Vector2<number>>"}
def __init__(self, *args):
super().__init__(*args)
| Vector2Vector2number |
python | kamyu104__LeetCode-Solutions | Python/collecting-chocolates.py | {
"start": 2475,
"end": 2901
} | class ____(object):
def minCost(self, nums, x):
"""
:type nums: List[int]
:type x: int
:rtype: int
"""
result = [x*k for k in xrange(len(nums)+1)]
for i in xrange(len(nums)):
curr = nums[i]
for k in xrange(len(result)):
curr = min(curr, nums[(i+k)%len(nums)])
result[k] += curr
return min(result)
| Solution3 |
python | astropy__astropy | astropy/io/ascii/tdat.py | {
"start": 1546,
"end": 5364
} | class ____(core.BaseSplitter):
"""Splitter for tdat data.
Handles the (deprecated) cases of multiple data delimiters, record
delimiters, and multi-line records.
Multiple data delimiters - Multiple delimiters can be specified in the
header, e.g. field_delimiter = "|!" would treat both | and !
as individual delimiters. Default: "|"
Record Delimiters - The record_delimiter can be specified in the header. By
default there is no record delimiter and new records should be set on
new lines. The following list standard escaped character sequences and their
equivalent meanings can be used:
* \t (tab)
* \b (backspace)
* \r (carriage return)
* \f (form feed)
* \v (vertical tab)
* \a (audible alert/bell),
* \\### (where ### is a number between 1 and 127 and represents the
ASCII character with that numerical code).
Note: Specifying a record delimiter value of "" is interpreted as a
single blank line between records.
Multi-line records - A single record may take more than one line, indicated in the header
"""
delimiter = "|"
record_delimiter = None
@property
def literals_dict(self):
"""Return a dictionary of placeholders to be used in place of
backslashed delimiter characters.
"""
return {c: f"literal{n}" for n, c in enumerate(self.delimiter)}
def preprocess_data_lines(self, lines, record_delimiter):
"""Split lines into multiple lines if a record_delimiter is specified."""
data_lines = []
for line in lines:
data_lines += re.split(rf"{record_delimiter}", line)
return data_lines
def process_line(self, line: str) -> str:
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
READ: override default to handle backslashed delimiter characters.
"""
for c in self.delimiter:
line = line.replace(f"\\{c}", self.literals_dict[c])
line = re.sub(rf"[{self.delimiter}]", "|", line)
return line.strip()
def __call__(self, lines, field_delimiter="|", record_delimiter=None, nlines=1):
""" """
self.delimiter = field_delimiter
def replace_placeholders(line):
for c in field_delimiter:
line = line.replace(self.literals_dict[c], c)
return line
if " " in field_delimiter:
warn(
TdatFormatWarning("Double check your data when using space delimiters.")
)
if record_delimiter is not None:
lines = self.preprocess_data_lines(lines, record_delimiter)
if hasattr(self, "process_line"):
lines = (self.process_line(x) for x in lines)
iline = 0
_lines = []
for line in lines:
_lines.append(line)
iline += 1
if iline == nlines:
vals = [
replace_placeholders(val)
for _line in _lines
for val in re.split(r"\|", _line)[:-1]
]
# Reset
iline = 0
_lines = []
if hasattr(self, "process_val"):
yield [self.process_val(x) for x in vals]
else: # pragma: no cover
yield vals
else: # pragma: no cover
continue
def join(self, vals):
delimiter = getattr(self, "delimiter", "|")
# TDAT specification requires a delimiter at the end of each record
return delimiter.join(str(x) for x in vals) + delimiter
| TdatDataSplitter |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/tests/test_ansi_code_processor.py | {
"start": 143,
"end": 9930
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = AnsiCodeProcessor()
self.qt_processor = QtAnsiCodeProcessor()
def test_clear(self):
""" Do control sequences for clearing the console work?
"""
string = '\x1b[2J\x1b[K'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'erase')
self.assertEqual(action.area, 'screen')
self.assertEqual(action.erase_to, 'all')
elif i == 1:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'erase')
self.assertEqual(action.area, 'line')
self.assertEqual(action.erase_to, 'end')
else:
self.fail('Too many substrings.')
self.assertEqual(i, 1, 'Too few substrings.')
#test_erase_in_line() is in test_00_console_widget.py, because it needs the console
def test_colors(self):
""" Do basic controls sequences for colors work?
"""
string = 'first\x1b[34mblue\x1b[0mlast\033[33mYellow'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEqual(substring, 'first')
self.assertEqual(self.processor.foreground_color, None)
elif i == 1:
self.assertEqual(substring, 'blue')
self.assertEqual(self.processor.foreground_color, 4)
elif i == 2:
self.assertEqual(substring, 'last')
self.assertEqual(self.processor.foreground_color, None)
elif i == 3:
foreground_color = self.processor.foreground_color
self.assertEqual(substring, 'Yellow')
self.assertEqual(foreground_color, 3)
self.assertEqual(self.qt_processor.get_color(foreground_color).name(), '#ffd700')
else:
self.fail('Too many substrings.')
self.assertEqual(i, 3, 'Too few substrings.')
def test_colors_xterm(self):
""" Do xterm-specific control sequences for colors work?
"""
string = '\x1b]4;20;rgb:ff/ff/ff\x1b' \
'\x1b]4;25;rgbi:1.0/1.0/1.0\x1b'
substrings = list(self.processor.split_string(string))
desired = { 20 : (255, 255, 255),
25 : (255, 255, 255) }
self.assertEqual(self.processor.color_map, desired)
string = '\x1b[38;5;20m\x1b[48;5;25m'
substrings = list(self.processor.split_string(string))
self.assertEqual(self.processor.foreground_color, 20)
self.assertEqual(self.processor.background_color, 25)
def test_true_color(self):
"""Do 24bit True Color control sequences?
"""
string = '\x1b[38;2;255;100;0m\x1b[48;2;100;100;100m'
substrings = list(self.processor.split_string(string))
self.assertEqual(self.processor.foreground_color, [255, 100, 0])
self.assertEqual(self.processor.background_color, [100, 100, 100])
def test_scroll(self):
""" Do control sequences for scrolling the buffer work?
"""
string = '\x1b[5S\x1b[T'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'scroll')
self.assertEqual(action.dir, 'up')
self.assertEqual(action.unit, 'line')
self.assertEqual(action.count, 5)
elif i == 1:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'scroll')
self.assertEqual(action.dir, 'down')
self.assertEqual(action.unit, 'line')
self.assertEqual(action.count, 1)
else:
self.fail('Too many substrings.')
self.assertEqual(i, 1, 'Too few substrings.')
def test_formfeed(self):
""" Are formfeed characters processed correctly?
"""
string = '\f' # form feed
self.assertEqual(list(self.processor.split_string(string)), [''])
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'scroll')
self.assertEqual(action.dir, 'down')
self.assertEqual(action.unit, 'page')
self.assertEqual(action.count, 1)
def test_carriage_return(self):
""" Are carriage return characters processed correctly?
"""
string = 'foo\rbar' # carriage return
splits = []
actions = []
for split in self.processor.split_string(string):
splits.append(split)
actions.append([action.action for action in self.processor.actions])
self.assertEqual(splits, ['foo', None, 'bar'])
self.assertEqual(actions, [[], ['carriage-return'], []])
def test_carriage_return_newline(self):
"""transform CRLF to LF"""
string = 'foo\rbar\r\ncat\r\n\n' # carriage return and newline
# only one CR action should occur, and '\r\n' should transform to '\n'
splits = []
actions = []
for split in self.processor.split_string(string):
splits.append(split)
actions.append([action.action for action in self.processor.actions])
self.assertEqual(splits, ['foo', None, 'bar', None, 'cat', None, None])
self.assertEqual(actions, [[], ['carriage-return'], [], ['newline'], [], ['newline'], ['newline']])
def test_beep(self):
""" Are beep characters processed correctly?
"""
string = 'foo\abar' # bell
splits = []
actions = []
for split in self.processor.split_string(string):
splits.append(split)
actions.append([action.action for action in self.processor.actions])
self.assertEqual(splits, ['foo', None, 'bar'])
self.assertEqual(actions, [[], ['beep'], []])
def test_backspace(self):
""" Are backspace characters processed correctly?
"""
string = 'foo\bbar' # backspace
splits = []
actions = []
for split in self.processor.split_string(string):
splits.append(split)
actions.append([action.action for action in self.processor.actions])
self.assertEqual(splits, ['foo', None, 'bar'])
self.assertEqual(actions, [[], ['backspace'], []])
def test_combined(self):
""" Are CR and BS characters processed correctly in combination?
BS is treated as a change in print position, rather than a
backwards character deletion. Therefore a BS at EOL is
effectively ignored.
"""
string = 'abc\rdef\b' # CR and backspace
splits = []
actions = []
for split in self.processor.split_string(string):
splits.append(split)
actions.append([action.action for action in self.processor.actions])
self.assertEqual(splits, ['abc', None, 'def', None])
self.assertEqual(actions, [[], ['carriage-return'], [], ['backspace']])
def test_move_cursor_up(self):
"""Are the ANSI commands for the cursor movement actions
(movement up and to the beginning of the line) processed correctly?
"""
# This line moves the cursor up once, then moves it up five more lines.
# Next, it moves the cursor to the beginning of the previous line, and
# finally moves it to the beginning of the fifth line above the current
# position
string = '\x1b[A\x1b[5A\x1b[F\x1b[5F'
i = -1
for i, substring in enumerate(self.processor.split_string(string)):
if i == 0:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'move')
self.assertEqual(action.dir, 'up')
self.assertEqual(action.unit, 'line')
self.assertEqual(action.count, 1)
elif i == 1:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'move')
self.assertEqual(action.dir, 'up')
self.assertEqual(action.unit, 'line')
self.assertEqual(action.count, 5)
elif i == 2:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'move')
self.assertEqual(action.dir, 'leftup')
self.assertEqual(action.unit, 'line')
self.assertEqual(action.count, 1)
elif i == 3:
self.assertEqual(len(self.processor.actions), 1)
action = self.processor.actions[0]
self.assertEqual(action.action, 'move')
self.assertEqual(action.dir, 'leftup')
self.assertEqual(action.unit, 'line')
self.assertEqual(action.count, 5)
else:
self.fail('Too many substrings.')
self.assertEqual(i, 3, 'Too few substrings.')
if __name__ == '__main__':
unittest.main()
| TestAnsiCodeProcessor |
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 122671,
"end": 191852
} | class ____(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
if has_tls_protocol(ssl.PROTOCOL_TLSv1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
if has_tls_protocol(ssl.PROTOCOL_TLSv1_1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256:eNULL")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
| ThreadedTests |
python | matplotlib__matplotlib | lib/matplotlib/rcsetup.py | {
"start": 1210,
"end": 24686
} | class ____:
def __init__(self, key, valid, ignorecase=False, *,
_deprecated_since=None):
"""*valid* is a list of legal strings."""
self.key = key
self.ignorecase = ignorecase
self._deprecated_since = _deprecated_since
def func(s):
if ignorecase:
return s.lower()
else:
return s
self.valid = {func(k): k for k in valid}
def __call__(self, s):
if self._deprecated_since:
name, = (k for k, v in globals().items() if v is self)
_api.warn_deprecated(
self._deprecated_since, name=name, obj_type="function")
if self.ignorecase and isinstance(s, str):
s = s.lower()
if s in self.valid:
return self.valid[s]
msg = (f"{s!r} is not a valid value for {self.key}; supported values "
f"are {[*self.valid.values()]}")
if (isinstance(s, str)
and (s.startswith('"') and s.endswith('"')
or s.startswith("'") and s.endswith("'"))
and s[1:-1] in self.valid):
msg += "; remove quotes surrounding your string"
raise ValueError(msg)
def _single_string_color_list(s, scalar_validator):
"""
Convert the string *s* to a list of colors interpreting it either as a
color sequence name, or a string containing single-letter colors.
"""
try:
colors = mpl.color_sequences[s]
except KeyError:
try:
# Sometimes, a list of colors might be a single string
# of single-letter colornames. So give that a shot.
colors = [scalar_validator(v.strip()) for v in s if v.strip()]
except ValueError:
raise ValueError(f'{s!r} is neither a color sequence name nor can '
'it be interpreted as a list of colors')
return colors
@lru_cache
def _listify_validator(scalar_validator, allow_stringlist=False, *,
n=None, doc=None):
def f(s):
if isinstance(s, str):
try:
val = [scalar_validator(v.strip()) for v in s.split(',')
if v.strip()]
except Exception:
if allow_stringlist:
# Special handling for colors
val = _single_string_color_list(s, scalar_validator)
else:
raise
# Allow any ordered sequence type -- generators, np.ndarray, pd.Series
# -- but not sets, whose iteration order is non-deterministic.
elif np.iterable(s) and not isinstance(s, (set, frozenset)):
# The condition on this list comprehension will preserve the
# behavior of filtering out any empty strings (behavior was
# from the original validate_stringlist()), while allowing
# any non-string/text scalar values such as numbers and arrays.
val = [scalar_validator(v) for v in s
if not isinstance(v, str) or v]
else:
raise ValueError(
f"Expected str or other non-set iterable, but got {s}")
if n is not None and len(val) != n:
raise ValueError(
f"Expected {n} values, but there are {len(val)} values in {s}")
return val
try:
f.__name__ = f"{scalar_validator.__name__}list"
except AttributeError: # class instance.
f.__name__ = f"{type(scalar_validator).__name__}List"
f.__qualname__ = f.__qualname__.rsplit(".", 1)[0] + "." + f.__name__
f.__doc__ = doc if doc is not None else scalar_validator.__doc__
return f
def validate_any(s):
return s
validate_anylist = _listify_validator(validate_any)
def _validate_date(s):
try:
np.datetime64(s)
return s
except ValueError:
raise ValueError(
f'{s!r} should be a string that can be parsed by numpy.datetime64')
def validate_bool(b):
"""Convert b to ``bool`` or raise."""
if isinstance(b, str):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError(f'Cannot convert {b!r} to bool')
def validate_axisbelow(s):
try:
return validate_bool(s)
except ValueError:
if isinstance(s, str):
if s == 'line':
return 'line'
raise ValueError(f'{s!r} cannot be interpreted as'
' True, False, or "line"')
def validate_dpi(s):
"""Confirm s is string 'figure' or convert s to float or raise."""
if s == 'figure':
return s
try:
return float(s)
except ValueError as e:
raise ValueError(f'{s!r} is not string "figure" and '
f'could not convert {s!r} to float') from e
def _make_type_validator(cls, *, allow_none=False):
"""
Return a validator that converts inputs to *cls* or raises (and possibly
allows ``None`` as well).
"""
def validator(s):
if (allow_none and
(s is None or cbook._str_lower_equal(s, "none"))):
if cbook._str_lower_equal(s, "none") and s != "None":
_api.warn_deprecated(
"3.11",
message=f"Using the capitalization {s!r} in matplotlibrc for "
"*None* is deprecated in %(removal)s and will lead to an "
"error from version 3.13 onward. Please use 'None' "
"instead."
)
return None
if cls is str and not isinstance(s, str):
raise ValueError(f'Could not convert {s!r} to str')
try:
return cls(s)
except (TypeError, ValueError) as e:
raise ValueError(
f'Could not convert {s!r} to {cls.__name__}') from e
validator.__name__ = f"validate_{cls.__name__}"
if allow_none:
validator.__name__ += "_or_None"
validator.__qualname__ = (
validator.__qualname__.rsplit(".", 1)[0] + "." + validator.__name__)
return validator
validate_string = _make_type_validator(str)
validate_string_or_None = _make_type_validator(str, allow_none=True)
validate_stringlist = _listify_validator(
validate_string, doc='return a list of strings')
validate_int = _make_type_validator(int)
validate_int_or_None = _make_type_validator(int, allow_none=True)
validate_float = _make_type_validator(float)
validate_float_or_None = _make_type_validator(float, allow_none=True)
validate_floatlist = _listify_validator(
validate_float, doc='return a list of floats')
def _validate_marker(s):
try:
return validate_int(s)
except ValueError as e:
try:
return validate_string(s)
except ValueError as e:
raise ValueError('Supported markers are [string, int]') from e
_validate_markerlist = _listify_validator(
_validate_marker, doc='return a list of markers')
def _validate_pathlike(s):
if isinstance(s, (str, os.PathLike)):
# Store value as str because savefig.directory needs to distinguish
# between "" (cwd) and "." (cwd, but gets updated by user selections).
return os.fsdecode(s)
else:
return validate_string(s)
def validate_fonttype(s):
"""
Confirm that this is a Postscript or PDF font type that we know how to
convert to.
"""
fonttypes = {'type3': 3,
'truetype': 42}
try:
fonttype = validate_int(s)
except ValueError:
try:
return fonttypes[s.lower()]
except KeyError as e:
raise ValueError('Supported Postscript/PDF font types are %s'
% list(fonttypes)) from e
else:
if fonttype not in fonttypes.values():
raise ValueError(
'Supported Postscript/PDF font types are %s' %
list(fonttypes.values()))
return fonttype
_auto_backend_sentinel = object()
def validate_backend(s):
if s is _auto_backend_sentinel or backend_registry.is_valid_backend(s):
return s
else:
msg = (f"'{s}' is not a valid value for backend; supported values are "
f"{backend_registry.list_all()}")
raise ValueError(msg)
def _validate_toolbar(s):
s = ValidateInStrings(
'toolbar', ['None', 'toolbar2', 'toolmanager'], ignorecase=True)(s)
if s == 'toolmanager':
_api.warn_external(
"Treat the new Tool classes introduced in v1.5 as experimental "
"for now; the API and rcParam may change in future versions.")
return s
def validate_color_or_inherit(s):
"""Return a valid color arg."""
if cbook._str_equal(s, 'inherit'):
return s
return validate_color(s)
def validate_color_or_auto(s):
if cbook._str_equal(s, 'auto'):
return s
return validate_color(s)
def _validate_color_or_edge(s):
if cbook._str_equal(s, 'edge'):
return s
return validate_color(s)
def validate_color_for_prop_cycle(s):
# N-th color cycle syntax can't go into the color cycle.
if isinstance(s, str) and re.match("^C[0-9]$", s):
raise ValueError(f"Cannot put cycle reference ({s!r}) in prop_cycler")
return validate_color(s)
def _validate_color_or_linecolor(s):
if cbook._str_equal(s, 'linecolor'):
return s
elif cbook._str_equal(s, 'mfc') or cbook._str_equal(s, 'markerfacecolor'):
return 'markerfacecolor'
elif cbook._str_equal(s, 'mec') or cbook._str_equal(s, 'markeredgecolor'):
return 'markeredgecolor'
elif s is None:
return None
elif isinstance(s, str) and len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if s.lower() == 'none':
return None
elif is_color_like(s):
return s
raise ValueError(f'{s!r} does not look like a color arg')
def validate_color(s):
"""Return a valid color arg."""
if isinstance(s, str):
if s.lower() == 'none':
return 'none'
if len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if is_color_like(s):
return s
# If it is still valid, it must be a tuple (as a string from matplotlibrc).
try:
color = ast.literal_eval(s)
except (SyntaxError, ValueError):
pass
else:
if is_color_like(color):
return color
raise ValueError(f'{s!r} does not look like a color arg')
def _validate_color_or_None(s):
if s is None or cbook._str_equal(s, "None"):
return None
return validate_color(s)
validate_colorlist = _listify_validator(
validate_color, allow_stringlist=True, doc='return a list of colorspecs')
def _validate_cmap(s):
_api.check_isinstance((str, Colormap), cmap=s)
return s
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError as e:
raise ValueError('not a valid aspect specification') from e
def validate_fontsize_None(s):
if s is None or s == 'None':
return None
else:
return validate_fontsize(s)
def validate_fontsize(s):
fontsizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large', 'smaller', 'larger']
if isinstance(s, str):
s = s.lower()
if s in fontsizes:
return s
try:
return float(s)
except ValueError as e:
raise ValueError("%s is not a valid font size. Valid font sizes "
"are %s." % (s, ", ".join(fontsizes))) from e
validate_fontsizelist = _listify_validator(validate_fontsize)
def validate_fontweight(s):
weights = [
'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman',
'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black']
# Note: Historically, weights have been case-sensitive in Matplotlib
if s in weights:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font weight.') from e
def validate_fontstretch(s):
stretchvalues = [
'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed',
'normal', 'semi-expanded', 'expanded', 'extra-expanded',
'ultra-expanded']
# Note: Historically, stretchvalues have been case-sensitive in Matplotlib
if s in stretchvalues:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font stretch.') from e
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
def _validate_mathtext_fallback(s):
_fallback_fonts = ['cm', 'stix', 'stixsans']
if isinstance(s, str):
s = s.lower()
if s is None or s == 'none':
return None
elif s.lower() in _fallback_fonts:
return s
else:
raise ValueError(
f"{s} is not a valid fallback font name. Valid fallback font "
f"names are {','.join(_fallback_fonts)}. Passing 'None' will turn "
"fallback off.")
def validate_whiskers(s):
try:
return _listify_validator(validate_float, n=2)(s)
except (TypeError, ValueError):
try:
return float(s)
except ValueError as e:
raise ValueError("Not a valid whisker value [float, "
"(float, float)]") from e
def validate_ps_distiller(s):
if isinstance(s, str):
s = s.lower()
if s in ('none', None, 'false', False):
return None
else:
return ValidateInStrings('ps.usedistiller', ['ghostscript', 'xpdf'])(s)
# A validator dedicated to the named line styles, based on the items in
# ls_mapper, and a list of possible strings read from Line2D.set_linestyle
_validate_named_linestyle = ValidateInStrings(
'linestyle',
[*ls_mapper.keys(), *ls_mapper.values(), 'None', 'none', ' ', ''],
ignorecase=True)
def _validate_linestyle(ls):
"""
A validator for all possible line styles, the named ones *and*
the on-off ink sequences.
"""
if isinstance(ls, str):
try: # Look first for a valid named line style, like '--' or 'solid'.
return _validate_named_linestyle(ls)
except ValueError:
pass
try:
ls = ast.literal_eval(ls) # Parsing matplotlibrc.
except (SyntaxError, ValueError):
pass # Will error with the ValueError at the end.
def _is_iterable_not_string_like(x):
# Explicitly exclude bytes/bytearrays so that they are not
# nonsensically interpreted as sequences of numbers (codepoints).
return np.iterable(x) and not isinstance(x, (str, bytes, bytearray))
if _is_iterable_not_string_like(ls):
if len(ls) == 2 and _is_iterable_not_string_like(ls[1]):
# (offset, (on, off, on, off, ...))
offset, onoff = ls
else:
# For backcompat: (on, off, on, off, ...); the offset is implicit.
offset = 0
onoff = ls
if (isinstance(offset, Real)
and len(onoff) % 2 == 0
and all(isinstance(elem, Real) for elem in onoff)):
return (offset, onoff)
raise ValueError(f"linestyle {ls!r} is not a valid on-off ink sequence.")
def _validate_linestyle_or_None(s):
if s is None or cbook._str_equal(s, "None"):
return None
return _validate_linestyle(s)
validate_fillstyle = ValidateInStrings(
'markers.fillstyle', ['full', 'left', 'right', 'bottom', 'top', 'none'])
validate_fillstylelist = _listify_validator(validate_fillstyle)
def validate_markevery(s):
"""
Validate the markevery property of a Line2D object.
Parameters
----------
s : None, int, (int, int), slice, float, (float, float), or list[int]
Returns
-------
None, int, (int, int), slice, float, (float, float), or list[int]
"""
# Validate s against type slice float int and None
if isinstance(s, (slice, float, int, type(None))):
return s
# Validate s against type tuple
if isinstance(s, tuple):
if (len(s) == 2
and (all(isinstance(e, int) for e in s)
or all(isinstance(e, float) for e in s))):
return s
else:
raise TypeError(
"'markevery' tuple must be pair of ints or of floats")
# Validate s against type list
if isinstance(s, list):
if all(isinstance(e, int) for e in s):
return s
else:
raise TypeError(
"'markevery' list must have all elements of type int")
raise TypeError("'markevery' is of an invalid type")
validate_markeverylist = _listify_validator(validate_markevery)
def validate_bbox(s):
if isinstance(s, str):
s = s.lower()
if s == 'tight':
return s
if s == 'standard':
return None
raise ValueError("bbox should be 'tight' or 'standard'")
elif s is not None:
# Backwards compatibility. None is equivalent to 'standard'.
raise ValueError("bbox should be 'tight' or 'standard'")
return s
def validate_sketch(s):
if isinstance(s, str):
s = s.lower().strip()
if s.startswith("(") and s.endswith(")"):
s = s[1:-1]
if s == 'none' or s is None:
return None
try:
return tuple(_listify_validator(validate_float, n=3)(s))
except ValueError as exc:
raise ValueError("Expected a (scale, length, randomness) tuple") from exc
def _validate_greaterthan_minushalf(s):
s = validate_float(s)
if s > -0.5:
return s
else:
raise RuntimeError(f'Value must be >-0.5; got {s}')
def _validate_greaterequal0_lessequal1(s):
s = validate_float(s)
if 0 <= s <= 1:
return s
else:
raise RuntimeError(f'Value must be >=0 and <=1; got {s}')
def _validate_int_greaterequal0(s):
s = validate_int(s)
if s >= 0:
return s
else:
raise RuntimeError(f'Value must be >=0; got {s}')
def validate_hatch(s):
r"""
Validate a hatch pattern.
A hatch pattern string can have any sequence of the following
characters: ``\ / | - + * . x o O``.
"""
if not isinstance(s, str):
raise ValueError("Hatch pattern must be a string")
_api.check_isinstance(str, hatch_pattern=s)
unknown = set(s) - {'\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O'}
if unknown:
raise ValueError("Unknown hatch symbol(s): %s" % list(unknown))
return s
validate_hatchlist = _listify_validator(validate_hatch)
validate_dashlist = _listify_validator(validate_floatlist)
def _validate_minor_tick_ndivs(n):
"""
Validate ndiv parameter related to the minor ticks.
It controls the number of minor ticks to be placed between
two major ticks.
"""
if cbook._str_lower_equal(n, 'auto'):
return n
try:
n = _validate_int_greaterequal0(n)
return n
except (RuntimeError, ValueError):
pass
raise ValueError("'tick.minor.ndivs' must be 'auto' or non-negative int")
_prop_validators = {
'color': _listify_validator(validate_color_for_prop_cycle,
allow_stringlist=True),
'linewidth': validate_floatlist,
'linestyle': _listify_validator(_validate_linestyle),
'facecolor': validate_colorlist,
'edgecolor': validate_colorlist,
'joinstyle': _listify_validator(JoinStyle),
'capstyle': _listify_validator(CapStyle),
'fillstyle': validate_fillstylelist,
'markerfacecolor': validate_colorlist,
'markersize': validate_floatlist,
'markeredgewidth': validate_floatlist,
'markeredgecolor': validate_colorlist,
'markevery': validate_markeverylist,
'alpha': validate_floatlist,
'marker': _validate_markerlist,
'hatch': validate_hatchlist,
'dashes': validate_dashlist,
}
_prop_aliases = {
'c': 'color',
'lw': 'linewidth',
'ls': 'linestyle',
'fc': 'facecolor',
'ec': 'edgecolor',
'mfc': 'markerfacecolor',
'mec': 'markeredgecolor',
'mew': 'markeredgewidth',
'ms': 'markersize',
}
def cycler(*args, **kwargs):
"""
Create a `~cycler.Cycler` object much like :func:`cycler.cycler`,
but includes input validation.
Call signatures::
cycler(cycler)
cycler(label=values, label2=values2, ...)
cycler(label, values)
Form 1 copies a given `~cycler.Cycler` object.
Form 2 creates a `~cycler.Cycler` which cycles over one or more
properties simultaneously. If multiple properties are given, their
value lists must have the same length.
Form 3 creates a `~cycler.Cycler` for a single property. This form
exists for compatibility with the original cycler. Its use is
discouraged in favor of the kwarg form, i.e. ``cycler(label=values)``.
Parameters
----------
cycler : Cycler
Copy constructor for Cycler.
label : str
The property key. Must be a valid `.Artist` property.
For example, 'color' or 'linestyle'. Aliases are allowed,
such as 'c' for 'color' and 'lw' for 'linewidth'.
values : iterable
Finite-length iterable of the property values. These values
are validated and will raise a ValueError if invalid.
Returns
-------
Cycler
A new :class:`~cycler.Cycler` for the given properties.
Examples
--------
Creating a cycler for a single property:
>>> c = cycler(color=['red', 'green', 'blue'])
Creating a cycler for simultaneously cycling over multiple properties
(e.g. red circle, green plus, blue cross):
>>> c = cycler(color=['red', 'green', 'blue'],
... marker=['o', '+', 'x'])
"""
if args and kwargs:
raise TypeError("cycler() can only accept positional OR keyword "
"arguments -- not both.")
elif not args and not kwargs:
raise TypeError("cycler() must have positional OR keyword arguments")
if len(args) == 1:
if not isinstance(args[0], Cycler):
raise TypeError("If only one positional argument given, it must "
"be a Cycler instance.")
return validate_cycler(args[0])
elif len(args) == 2:
pairs = [(args[0], args[1])]
elif len(args) > 2:
raise _api.nargs_error('cycler', '0-2', len(args))
else:
pairs = kwargs.items()
validated = []
for prop, vals in pairs:
norm_prop = _prop_aliases.get(prop, prop)
validator = _prop_validators.get(norm_prop, None)
if validator is None:
raise TypeError("Unknown artist property: %s" % prop)
vals = validator(vals)
# We will normalize the property names as well to reduce
# the amount of alias handling code elsewhere.
validated.append((norm_prop, vals))
return reduce(operator.add, (ccycler(k, v) for k, v in validated))
| ValidateInStrings |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/llm/multiplex/test_lora_model_loader.py | {
"start": 363,
"end": 7260
} | class ____:
"""Test suite for the LoraModelLoader class."""
@pytest.fixture
def model_loader(self):
"""Provides a LoraModelLoader instance for tests."""
return LoraModelLoader("/tmp/ray/lora/cache", max_tries=3)
@pytest.fixture
def llm_config(self, disable_placement_bundles):
"""Common LLM config used across tests."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="llm_model_id"),
llm_engine=LLMEngine.vLLM,
accelerator_type="L4",
lora_config=LoraConfig(
dynamic_lora_loading_path="s3://fake-bucket-uri-abcd"
),
)
@pytest.fixture
def lora_model_id(self):
"""Common LoRA model ID used across tests."""
return "base_model:lora_id"
@pytest.fixture
def lora_mirror_config(self, lora_model_id):
"""Common LoRA mirror config used across tests."""
return LoraMirrorConfig(
lora_model_id=lora_model_id,
bucket_uri="s3://fake-bucket-uri-abcd",
max_total_tokens=4096,
)
@pytest.mark.asyncio
async def test_basic_loading(
self, model_loader, llm_config, lora_model_id, lora_mirror_config
):
"""Test basic model loading functionality."""
# Create a simple mock for sync_model
mock_sync_model = Mock()
with patch(
"ray.llm._internal.serve.utils.lora_serve_utils.sync_files_with_lock",
side_effect=mock_sync_model,
):
# First load should download the model
disk_multiplex_config = await model_loader.load_model(
lora_model_id=lora_model_id,
lora_mirror_config=lora_mirror_config,
)
# Verify sync_files_with_lock was called with correct parameters
mock_sync_model.assert_called_once_with(
"s3://fake-bucket-uri-abcd",
"/tmp/ray/lora/cache/lora_id",
timeout=model_loader.download_timeout_s,
)
mock_sync_model.reset_mock()
# Second time we don't load from S3 - should use cache
new_disk_config = await model_loader.load_model(
lora_model_id=lora_model_id,
lora_mirror_config=lora_mirror_config,
)
assert new_disk_config == disk_multiplex_config
mock_sync_model.assert_not_called()
@pytest.mark.asyncio
async def test_retry_logic(
self, model_loader, llm_config, lora_model_id, lora_mirror_config
):
"""Test that the lora model load task is properly retried on failure."""
# Counter to track number of sync_model calls
attempt_count = 0
# Create a mock for sync_files_with_lock that tracks calls and fails initially
def mock_sync_model(bucket_uri, local_path, timeout=None):
nonlocal attempt_count
attempt_count += 1
# Fail on first attempt, succeed on second
if attempt_count == 1:
raise RuntimeError("Simulated download failure")
# Success on subsequent attempts
return None
with patch(
"ray.llm._internal.serve.utils.lora_serve_utils.sync_files_with_lock",
side_effect=Mock(side_effect=mock_sync_model),
):
# First load should trigger a retry
disk_multiplex_config = await model_loader.load_model(
lora_model_id=lora_model_id,
lora_mirror_config=lora_mirror_config,
)
# Verify retry happened exactly once
assert attempt_count == 2
# Reset counter
attempt_count = 0
# Load again (should use cache, no download attempts)
new_disk_config = await model_loader.load_model(
lora_model_id=lora_model_id,
lora_mirror_config=lora_mirror_config,
)
# Verify no new download attempts
assert attempt_count == 0
# Verify cached config is returned
assert new_disk_config == disk_multiplex_config
@pytest.mark.asyncio
async def test_concurrent_loading(
self, model_loader, llm_config, lora_model_id, lora_mirror_config
):
"""Test that concurrent loads only trigger one download process."""
# Counter to track number of sync_model calls
attempt_count = 0
# Create a mock for sync_files_with_lock that tracks calls and fails initially
def mock_sync_model(bucket_uri, local_path, timeout=None):
nonlocal attempt_count
attempt_count += 1
# Fail on first attempt, succeed on second
if attempt_count == 1:
raise RuntimeError("Simulated download failure")
# Success on subsequent attempts
return None
with patch(
"ray.llm._internal.serve.utils.lora_serve_utils.sync_files_with_lock",
side_effect=Mock(side_effect=mock_sync_model),
):
# Clear cache to force download
model_loader.disk_cache.clear()
# Create multiple concurrent tasks
tasks = [
asyncio.create_task(
model_loader.load_model(
lora_model_id=lora_model_id,
lora_mirror_config=lora_mirror_config,
)
)
for _ in range(3)
]
# Wait for all tasks to complete
results = await asyncio.gather(*tasks)
# Verify retry happened exactly once across all tasks
assert attempt_count == 2
# All tasks should return the same result
assert all(result == results[0] for result in results)
@pytest.mark.asyncio
async def test_max_retries_exhaustion(
self, model_loader, llm_config, lora_model_id, lora_mirror_config
):
"""Test that an error is raised when max retries are exhausted."""
# Mock that always fails
def mock_sync_model_always_fails(*args, **kwargs):
raise RuntimeError("Simulated persistent failure")
with patch(
"ray.llm._internal.serve.utils.lora_serve_utils.sync_files_with_lock",
side_effect=Mock(side_effect=mock_sync_model_always_fails),
):
# Should fail after max_tries (3) attempts
with pytest.raises(RuntimeError) as excinfo:
await model_loader.load_model(
lora_model_id=lora_model_id,
lora_mirror_config=lora_mirror_config,
)
assert "Simulated persistent failure" in str(excinfo.value)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestLoRAModelLoader |
python | doocs__leetcode | solution/3000-3099/3071.Minimum Operations to Write the Letter Y on a Grid/Solution.py | {
"start": 0,
"end": 614
} | class ____:
def minimumOperationsToWriteY(self, grid: List[List[int]]) -> int:
n = len(grid)
cnt1 = Counter()
cnt2 = Counter()
for i, row in enumerate(grid):
for j, x in enumerate(row):
a = i == j and i <= n // 2
b = i + j == n - 1 and i <= n // 2
c = j == n // 2 and i >= n // 2
if a or b or c:
cnt1[x] += 1
else:
cnt2[x] += 1
return min(
n * n - cnt1[i] - cnt2[j] for i in range(3) for j in range(3) if i != j
)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super1.py | {
"start": 1282,
"end": 1482
} | class ____(ClassA):
def method5(self):
class ClassDInner(super().method5()):
# This should generate an error.
x = super().method5()
return ClassDInner
| ClassE |
python | lepture__authlib | authlib/oidc/core/grants/hybrid.py | {
"start": 373,
"end": 3430
} | class ____(OpenIDImplicitGrant):
#: Generated "code" length
AUTHORIZATION_CODE_LENGTH = 48
RESPONSE_TYPES = {"code id_token", "code token", "code id_token token"}
GRANT_TYPE = "code"
DEFAULT_RESPONSE_MODE = "fragment"
def generate_authorization_code(self):
""" "The method to generate "code" value for authorization code data.
Developers may rewrite this method, or customize the code length with::
class MyAuthorizationCodeGrant(AuthorizationCodeGrant):
AUTHORIZATION_CODE_LENGTH = 32 # default is 48
"""
return generate_token(self.AUTHORIZATION_CODE_LENGTH)
def save_authorization_code(self, code, request):
"""Save authorization_code for later use. Developers MUST implement
it in subclass. Here is an example::
def save_authorization_code(self, code, request):
client = request.client
auth_code = AuthorizationCode(
code=code,
client_id=client.client_id,
redirect_uri=request.payload.redirect_uri,
scope=request.payload.scope,
nonce=request.payload.data.get("nonce"),
user_id=request.user.id,
)
auth_code.save()
"""
raise NotImplementedError()
def validate_authorization_request(self):
if not is_openid_scope(self.request.payload.scope):
raise InvalidScopeError(
"Missing 'openid' scope",
redirect_uri=self.request.payload.redirect_uri,
redirect_fragment=True,
)
self.register_hook(
"after_validate_authorization_request_payload",
lambda grant, redirect_uri: validate_nonce(
grant.request, grant.exists_nonce, required=True
),
)
return validate_code_authorization_request(self)
def create_granted_params(self, grant_user):
self.request.user = grant_user
client = self.request.client
code = self.generate_authorization_code()
self.save_authorization_code(code, self.request)
params = [("code", code)]
token = self.generate_token(
grant_type="implicit",
user=grant_user,
scope=self.request.payload.scope,
include_refresh_token=False,
)
response_types = self.request.payload.response_type.split()
if "token" in response_types:
log.debug("Grant token %r to %r", token, client)
self.server.save_token(token, self.request)
if "id_token" in response_types:
token = self.process_implicit_token(token, code)
else:
# response_type is "code id_token"
token = {"expires_in": token["expires_in"], "scope": token["scope"]}
token = self.process_implicit_token(token, code)
params.extend([(k, token[k]) for k in token])
return params
| OpenIDHybridGrant |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/instance/methods/scheduling_methods.py | {
"start": 922,
"end": 18251
} | class ____:
"""Mixin class containing scheduling-related functionality for DagsterInstance.
This class provides methods for schedule, sensor, and backfill management.
All methods are implemented as instance methods that DagsterInstance inherits.
"""
@property
def _instance(self) -> "DagsterInstance":
"""Cast self to DagsterInstance for type-safe access to instance methods and properties."""
from dagster._core.instance.instance import DagsterInstance
return check.inst(self, DagsterInstance)
# Private member access wrappers with consolidated type: ignore
@property
def _scheduler_impl(self):
"""Access to scheduler."""
return self._instance._scheduler # noqa: SLF001
@property
def _schedule_storage_impl(self):
"""Access to schedule storage."""
return self._instance._schedule_storage # noqa: SLF001
@property
def _run_storage_impl(self):
"""Access to run storage."""
return self._instance._run_storage # noqa: SLF001
# Public properties for schedule/sensor storage - moved from InstigatorMixin
@property
def schedule_storage(self) -> Optional["ScheduleStorage"]:
"""Get schedule storage."""
return self._schedule_storage_impl
@property
def scheduler(self) -> Optional["Scheduler"]:
"""Get scheduler."""
return self._scheduler_impl
@property
def scheduler_class(self) -> Optional[str]:
"""Get scheduler class name."""
return self.scheduler.__class__.__name__ if self.scheduler else None
def schedules_directory(self) -> str:
"""Get schedules directory - delegates to StorageMethods."""
# Access the method from StorageMethods mixin
from dagster._core.instance.methods.storage_methods import StorageMethods
return StorageMethods.schedules_directory(self._instance)
def start_schedule(self, remote_schedule: "RemoteSchedule") -> "InstigatorState":
"""Start schedule - moved from DagsterInstance.start_schedule()."""
if not self._scheduler_impl:
check.failed("Scheduler not available")
return self._scheduler_impl.start_schedule(self._instance, remote_schedule)
def stop_schedule(
self,
schedule_origin_id: str,
schedule_selector_id: str,
remote_schedule: Optional["RemoteSchedule"] = None,
) -> "InstigatorState":
"""Stop schedule - moved from DagsterInstance.stop_schedule()."""
if not self._scheduler_impl:
check.failed("Scheduler not available")
return self._scheduler_impl.stop_schedule(
self._instance, schedule_origin_id, schedule_selector_id, remote_schedule
)
def reset_schedule(self, remote_schedule: "RemoteSchedule") -> "InstigatorState":
"""Reset schedule - moved from DagsterInstance.reset_schedule()."""
if not self._scheduler_impl:
check.failed("Scheduler not available")
return self._scheduler_impl.reset_schedule(self._instance, remote_schedule)
def start_sensor(self, remote_sensor: "RemoteSensor") -> "InstigatorState":
"""Start sensor - moved from DagsterInstance.start_sensor()."""
from typing import cast
from dagster._core.definitions.run_request import InstigatorType
from dagster._core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
SensorInstigatorData,
)
from dagster._time import get_current_timestamp
stored_state = self.get_instigator_state(
remote_sensor.get_remote_origin_id(), remote_sensor.selector_id
)
computed_state = remote_sensor.get_current_instigator_state(stored_state)
if computed_state.is_running:
return computed_state
if not stored_state:
return self.add_instigator_state(
InstigatorState(
remote_sensor.get_remote_origin(),
InstigatorType.SENSOR,
InstigatorStatus.RUNNING,
SensorInstigatorData(
min_interval=remote_sensor.min_interval_seconds,
last_sensor_start_timestamp=get_current_timestamp(),
sensor_type=remote_sensor.sensor_type,
),
)
)
else:
data = cast("SensorInstigatorData", stored_state.instigator_data)
return self.update_instigator_state(
stored_state.with_status(InstigatorStatus.RUNNING).with_data(
data.with_sensor_start_timestamp(get_current_timestamp())
)
)
def stop_sensor(
self,
instigator_origin_id: str,
selector_id: str,
remote_sensor: Optional["RemoteSensor"],
) -> "InstigatorState":
"""Stop sensor - moved from DagsterInstance.stop_sensor()."""
import dagster._check as check
from dagster._core.definitions.run_request import InstigatorType
from dagster._core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
SensorInstigatorData,
)
stored_state = self.get_instigator_state(instigator_origin_id, selector_id)
computed_state: InstigatorState
if remote_sensor:
computed_state = remote_sensor.get_current_instigator_state(stored_state)
else:
computed_state = check.not_none(stored_state)
if not computed_state.is_running:
return computed_state
if not stored_state:
assert remote_sensor
return self.add_instigator_state(
InstigatorState(
remote_sensor.get_remote_origin(),
InstigatorType.SENSOR,
InstigatorStatus.STOPPED,
SensorInstigatorData(
min_interval=remote_sensor.min_interval_seconds,
sensor_type=remote_sensor.sensor_type,
),
)
)
else:
return self.update_instigator_state(stored_state.with_status(InstigatorStatus.STOPPED))
def reset_sensor(self, remote_sensor: "RemoteSensor") -> "InstigatorState":
"""Reset sensor - moved from DagsterInstance.reset_sensor()."""
from dagster._core.definitions.run_request import InstigatorType
from dagster._core.scheduler.instigation import (
InstigatorState,
InstigatorStatus,
SensorInstigatorData,
)
stored_state = self.get_instigator_state(
remote_sensor.get_remote_origin_id(), remote_sensor.selector_id
)
new_status = InstigatorStatus.DECLARED_IN_CODE
if not stored_state:
new_instigator_data = SensorInstigatorData(
min_interval=remote_sensor.min_interval_seconds,
sensor_type=remote_sensor.sensor_type,
)
reset_state = self.add_instigator_state(
state=InstigatorState(
remote_sensor.get_remote_origin(),
InstigatorType.SENSOR,
new_status,
new_instigator_data,
)
)
else:
reset_state = self.update_instigator_state(state=stored_state.with_status(new_status))
return reset_state
@traced
def all_instigator_state(
self,
repository_origin_id: Optional[str] = None,
repository_selector_id: Optional[str] = None,
instigator_type: Optional["InstigatorType"] = None,
instigator_statuses: Optional[set["InstigatorStatus"]] = None,
) -> Sequence["InstigatorState"]:
"""Get all instigator states - moved from DagsterInstance.all_instigator_state()."""
if not self._schedule_storage_impl:
check.failed("Schedule storage not available")
return self._schedule_storage_impl.all_instigator_state(
repository_origin_id=repository_origin_id,
repository_selector_id=repository_selector_id,
instigator_type=instigator_type,
instigator_statuses=instigator_statuses,
)
def get_instigator_state(self, origin_id: str, selector_id: str) -> Optional["InstigatorState"]:
"""Get instigator state - moved from DagsterInstance.get_instigator_state()."""
if not self._schedule_storage_impl:
check.failed("Schedule storage not available")
return self._schedule_storage_impl.get_instigator_state(origin_id, selector_id)
def add_instigator_state(self, state: "InstigatorState") -> "InstigatorState":
"""Add instigator state - moved from DagsterInstance.add_instigator_state()."""
if not self._schedule_storage_impl:
check.failed("Schedule storage not available")
return self._schedule_storage_impl.add_instigator_state(state)
def update_instigator_state(self, state: "InstigatorState") -> "InstigatorState":
"""Update instigator state - moved from DagsterInstance.update_instigator_state()."""
if not self._schedule_storage_impl:
check.failed("Schedule storage not available")
return self._schedule_storage_impl.update_instigator_state(state)
def delete_instigator_state(self, origin_id: str, selector_id: str) -> None:
"""Delete instigator state - moved from DagsterInstance.delete_instigator_state()."""
if not self._schedule_storage_impl:
check.failed("Schedule storage not available")
return self._schedule_storage_impl.delete_instigator_state(origin_id, selector_id)
def get_backfills(
self,
filters: Optional["BulkActionsFilter"] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
status: Optional["BulkActionStatus"] = None,
) -> Sequence["PartitionBackfill"]:
"""Get backfills - moved from DagsterInstance.get_backfills()."""
return self._run_storage_impl.get_backfills(
status=status, cursor=cursor, limit=limit, filters=filters
)
def get_backfills_count(self, filters: Optional["BulkActionsFilter"] = None) -> int:
"""Get backfills count - moved from DagsterInstance.get_backfills_count()."""
return self._run_storage_impl.get_backfills_count(filters=filters)
def get_backfill(self, backfill_id: str) -> Optional["PartitionBackfill"]:
"""Get backfill - moved from DagsterInstance.get_backfill()."""
return self._run_storage_impl.get_backfill(backfill_id)
def add_backfill(self, partition_backfill: "PartitionBackfill") -> None:
"""Add backfill - moved from DagsterInstance.add_backfill()."""
self._run_storage_impl.add_backfill(partition_backfill)
def update_backfill(self, partition_backfill: "PartitionBackfill") -> None:
"""Update backfill - moved from DagsterInstance.update_backfill()."""
self._run_storage_impl.update_backfill(partition_backfill)
def wipe_all_schedules(self) -> None:
"""Wipe all schedules - moved from DagsterInstance.wipe_all_schedules()."""
if self._scheduler_impl:
self._scheduler_impl.wipe(self._instance) # type: ignore
if not self._schedule_storage_impl:
check.failed("Schedule storage not available")
self._schedule_storage_impl.wipe()
def logs_path_for_schedule(self, schedule_origin_id: str) -> str:
"""Get logs path for schedule - moved from DagsterInstance.logs_path_for_schedule()."""
if not self._scheduler_impl:
check.failed("Scheduler not available")
return self._scheduler_impl.get_logs_path(self._instance, schedule_origin_id)
def scheduler_debug_info(self) -> "SchedulerDebugInfo":
"""Get scheduler debug info - moved from DagsterInstance.scheduler_debug_info()."""
from collections.abc import Mapping
from dagster._core.definitions.run_request import InstigatorType
from dagster._core.scheduler import SchedulerDebugInfo
errors = []
schedules: list[str] = []
for schedule_state in self.all_instigator_state(instigator_type=InstigatorType.SCHEDULE):
schedule_info: Mapping[str, Mapping[str, object]] = {
schedule_state.instigator_name: {
"status": schedule_state.status.value,
"repository_origin_id": schedule_state.repository_origin_id,
"schedule_origin_id": schedule_state.instigator_origin_id,
"cron_schedule": getattr(schedule_state.instigator_data, "cron_schedule", None)
if schedule_state.instigator_data
else None,
}
}
schedules.append(str(schedule_info))
sensors: list[str] = []
for sensor_state in self.all_instigator_state(instigator_type=InstigatorType.SENSOR):
sensor_info: Mapping[str, Mapping[str, object]] = {
sensor_state.instigator_name: {
"status": sensor_state.status.value,
"repository_origin_id": sensor_state.repository_origin_id,
"sensor_origin_id": sensor_state.instigator_origin_id,
}
}
sensors.append(str(sensor_info))
return SchedulerDebugInfo(
errors=errors,
scheduler_config_info=self._instance.scheduler_class or "",
scheduler_info=self._instance.scheduler.debug_info()
if self._instance.scheduler
else "",
schedule_storage=schedules + sensors,
)
def get_tick_retention_settings(
self, instigator_type: "InstigatorType"
) -> Mapping["TickStatus", int]:
"""Get tick retention settings - moved from DagsterInstance.get_tick_retention_settings()."""
from dagster._core.definitions.run_request import InstigatorType
from dagster._core.instance.config import (
get_default_tick_retention_settings,
get_tick_retention_settings,
)
retention_settings = self._instance.get_settings("retention")
if instigator_type == InstigatorType.SCHEDULE:
tick_settings = retention_settings.get("schedule")
elif instigator_type == InstigatorType.SENSOR:
tick_settings = retention_settings.get("sensor")
elif instigator_type == InstigatorType.AUTO_MATERIALIZE:
tick_settings = retention_settings.get("auto_materialize")
else:
raise Exception(f"Unexpected instigator type {instigator_type}")
default_tick_settings = get_default_tick_retention_settings(instigator_type)
return get_tick_retention_settings(tick_settings, default_tick_settings)
# Tick operations - moved from InstigatorMixin
@property
def supports_batch_tick_queries(self) -> bool:
return bool(
self._schedule_storage_impl and self._schedule_storage_impl.supports_batch_queries
)
@traced
def get_batch_ticks(
self,
selector_ids: Sequence[str],
limit: Optional[int] = None,
statuses: Optional[Sequence["TickStatus"]] = None,
) -> Mapping[str, Sequence["InstigatorTick"]]:
if not self._schedule_storage_impl:
return {}
return self._schedule_storage_impl.get_batch_ticks(selector_ids, limit, statuses)
@traced
def get_tick(
self, origin_id: str, selector_id: str, timestamp: float
) -> Optional["InstigatorTick"]:
if not self._schedule_storage_impl:
return None
matches = self._schedule_storage_impl.get_ticks(
origin_id, selector_id, before=timestamp + 1, after=timestamp - 1, limit=1
)
return matches[0] if len(matches) else None
@traced
def get_ticks(
self,
origin_id: str,
selector_id: str,
before: Optional[float] = None,
after: Optional[float] = None,
limit: Optional[int] = None,
statuses: Optional[Sequence["TickStatus"]] = None,
) -> Sequence["InstigatorTick"]:
if not self._schedule_storage_impl:
return []
return self._schedule_storage_impl.get_ticks(
origin_id,
selector_id,
before=before,
after=after,
limit=limit,
statuses=statuses,
)
def create_tick(self, tick_data: "TickData") -> "InstigatorTick":
return check.not_none(self._schedule_storage_impl).create_tick(tick_data)
def update_tick(self, tick: "InstigatorTick"):
return check.not_none(self._schedule_storage_impl).update_tick(tick)
def purge_ticks(
self,
origin_id: str,
selector_id: str,
before: float,
tick_statuses: Optional[Sequence["TickStatus"]] = None,
) -> None:
if self._schedule_storage_impl:
self._schedule_storage_impl.purge_ticks(origin_id, selector_id, before, tick_statuses)
def get_tick_termination_check_interval(self) -> Optional[int]:
return None
| SchedulingMethods |
python | huggingface__transformers | src/transformers/models/janus/image_processing_janus_fast.py | {
"start": 1224,
"end": 8818
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
min_size = 14
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
valid_kwargs = JanusImageProcessorKwargs
def __init__(self, **kwargs: Unpack[JanusImageProcessorKwargs]):
super().__init__(**kwargs)
if kwargs.get("image_mean") is None:
background_color = (127, 127, 127)
else:
background_color = tuple(int(x * 255) for x in kwargs.get("image_mean"))
self.background_color = tuple(background_color)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
min_size: int,
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
if size.height is None or size.width is None or size.height != size.width:
raise ValueError(
f"Output height and width must be the same. Got height={size['height']} and width={size['width']}"
)
size = size.height
height, width = image.shape[-2:]
max_size = max(height, width)
delta = size / max_size
# Largest side becomes `size` and the other side is scaled according to the aspect ratio.
output_size_nonpadded = SizeDict(
height=max(int(height * delta), min_size),
width=max(int(width * delta), min_size),
)
return super().resize(image, size=output_size_nonpadded, interpolation=interpolation, antialias=antialias)
def pad_to_square(
self,
images: "torch.Tensor",
background_color: Union[int, tuple[int, int, int]] = 0,
) -> "torch.Tensor":
"""
Pads an image to a square based on the longest edge.
Args:
images (`torch.Tensor`):
The images to pad.
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in multi-channel mode, it will default to `0` in subsequent channels.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = images.shape[-2:]
num_channels = images.shape[1]
batch_size = images.shape[0]
if height == width:
return images
max_dim = max(height, width)
# Ensure background_color is the correct shape
if isinstance(background_color, int):
background_color = [background_color]
elif len(background_color) != num_channels:
raise ValueError(
f"background_color must have no more than {num_channels} elements to match the number of channels"
)
padded_images = torch.zeros(
(batch_size, num_channels, max_dim, max_dim), dtype=images.dtype, device=images.device
)
for i, color in enumerate(background_color):
padded_images[:, i, :, :] = color
if width > height:
start = (max_dim - height) // 2
padded_images[:, :, start : start + height, :] = images
else:
start = (max_dim - width) // 2
padded_images[:, :, :, start : start + width] = images
return padded_images
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
min_size: int,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
do_pad: bool = True,
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(
image=stacked_images, size=size, min_size=min_size, interpolation=interpolation
)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_pad:
stacked_images = self.pad_to_square(stacked_images, background_color=self.background_color)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def postprocess(
self,
images: ImageInput,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[list[float]] = None,
image_std: Optional[list[float]] = None,
return_tensors: Optional[str] = None,
) -> "torch.Tensor":
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
image_mean = tuple(-rescale_factor * mean / std for mean, std in zip(image_mean, image_std))
image_std = tuple(1 / std for std in image_std)
images = self.preprocess(
images,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=False,
do_pad=False,
return_tensors=return_tensors,
).pixel_values
if do_rescale:
images = [image.clip(0, 255).to(torch.uint8) for image in images]
if do_normalize and do_rescale and return_tensors == "PIL.Image.Image":
images = [F.to_pil_image(image) for image in images]
data = {"pixel_values": images}
return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["JanusImageProcessorFast"]
| JanusImageProcessorFast |
python | kamyu104__LeetCode-Solutions | Python/maximum-length-substring-with-two-occurrences.py | {
"start": 78,
"end": 754
} | class ____(object):
def maximumLengthSubstring(self, s):
"""
:type s: str
:rtype: int
"""
COUNT = 2
result = 0
cnt = [0]*26
left = invalid_cnt = 0
for right, x in enumerate(s):
if cnt[ord(x)-ord('a')] == COUNT:
invalid_cnt += 1
cnt[ord(x)-ord('a')] += 1
if invalid_cnt:
cnt[ord(s[left])-ord('a')] -= 1
if cnt[ord(s[left])-ord('a')] == COUNT:
invalid_cnt -= 1
left += 1
return right-left+1
# Time: O(n + 26)
# Space: O(26)
# freq table, sliding window, two pointers
| Solution |
python | oauthlib__oauthlib | oauthlib/oauth1/rfc5849/endpoints/resource.py | {
"start": 323,
"end": 7374
} | class ____(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interpreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and absence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['realm'] = valid_realm
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
| ResourceEndpoint |
python | skorch-dev__skorch | skorch/tests/test_regressor.py | {
"start": 247,
"end": 7390
} | class ____:
@pytest.fixture(scope='module')
def data(self, regression_data):
return regression_data
@pytest.fixture(scope='module')
def module_cls(self):
from skorch.toy import make_regressor
return make_regressor(dropout=0.5)
@pytest.fixture(scope='module')
def module_pred_1d_cls(self):
from skorch.toy import MLPModule
# Module that returns 1d predictions
return partial(MLPModule, output_units=1, squeeze_output=True)
@pytest.fixture(scope='module')
def net_cls(self):
from skorch import NeuralNetRegressor
return NeuralNetRegressor
@pytest.fixture(scope='module')
def net(self, net_cls, module_cls):
return net_cls(
module_cls,
max_epochs=20,
lr=0.1,
)
@pytest.fixture(scope='module')
def multioutput_module_cls(self):
from skorch.toy import make_regressor
return make_regressor(output_units=3, dropout=0.5)
@pytest.fixture(scope='module')
def multioutput_net(self, net_cls, multioutput_module_cls):
return net_cls(
multioutput_module_cls,
max_epochs=1,
lr=0.1,
)
@pytest.fixture(scope='module')
def net_fit(self, net, data):
# Careful, don't call additional fits on this, since that would have
# side effects on other tests.
X, y = data
return net.fit(X, y)
def test_clone(self, net_fit):
clone(net_fit)
def test_fit(self, net_fit, recwarn):
# fitting does not raise anything and does not warn
assert not recwarn.list
@pytest.mark.parametrize('method', INFERENCE_METHODS)
def test_not_init_raises(self, net_cls, module_cls, data, method):
from skorch.exceptions import NotInitializedError
net = net_cls(module_cls)
X = data[0]
with pytest.raises(NotInitializedError) as exc:
# we call `list` because `forward_iter` is lazy
list(getattr(net, method)(X))
msg = ("This NeuralNetRegressor instance is not initialized "
"yet. Call 'initialize' or 'fit' with appropriate arguments "
"before using this method.")
assert exc.value.args[0] == msg
def test_not_fitted_raises(self, net_cls, module_cls):
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
net = net_cls(module_cls)
with pytest.raises(NotFittedError) as exc:
check_is_fitted(net)
msg = (
"This NeuralNetRegressor instance is not fitted yet. "
"Call 'fit' with appropriate arguments before "
"using this estimator."
)
assert exc.value.args[0] == msg
def test_net_learns(self, net, net_cls, data, module_cls):
X, y = data
net = net_cls(
module_cls,
max_epochs=10,
lr=0.1,
)
net.fit(X, y)
train_losses = net.history[:, 'train_loss']
assert train_losses[0] > 2 * train_losses[-1]
def test_history_default_keys(self, net_fit):
expected_keys = {'train_loss', 'valid_loss', 'epoch', 'dur', 'batches'}
for row in net_fit.history:
assert expected_keys.issubset(row)
def test_predict_predict_proba(self, net_fit, data):
X = data[0]
y_pred = net_fit.predict(X)
# predictions should not be all zeros
assert not np.allclose(y_pred, 0)
y_proba = net_fit.predict_proba(X)
# predict and predict_proba should be identical for regression
assert np.allclose(y_pred, y_proba, atol=1e-6)
def test_score(self, net_fit, data):
X, y = data
r2_score = net_fit.score(X, y)
assert r2_score <= 1.
def test_multioutput_score(self, multioutput_net, multioutput_regression_data):
X, y = multioutput_regression_data
multioutput_net.fit(X, y)
r2_score = multioutput_net.score(X, y)
assert r2_score <= 1.
def test_dimension_mismatch_warning(self, net_cls, module_cls, data, recwarn):
# When the target and the prediction have different dimensionality, mse
# loss will broadcast them, calculating all pairwise errors instead of
# only sample-wise. Since the errors are averaged at the end, there is
# still a valid loss, which makes the error hard to spot. Thankfully,
# torch gives a warning in that case. We test that this warning exists,
# otherwise, skorch users could run into very hard to debug issues
# during training.
net = net_cls(module_cls)
X, y = data
X, y = X[:100], y[:100].flatten() # make y 1d
net.fit(X, y)
# The warning comes from PyTorch, so checking the exact wording is prone to
# error in future PyTorch versions. We thus check a substring of the
# whole message and cross our fingers that it's not changed.
msg_substr = (
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size"
)
warn_list = [w for w in recwarn.list if msg_substr in str(w.message)]
# one warning for train, one for valid
assert len(warn_list) == 2
def test_fitting_with_1d_target_and_pred(
self, net_cls, module_cls, data, module_pred_1d_cls, recwarn
):
# This test relates to the previous one. In general, users should fit
# with target and prediction being 2d, even if the 2nd dimension is just
# 1. However, in some circumstances (like when using BaggingRegressor,
# see next test), having the ability to fit with 1d is required. In that
# case, the module output also needs to be 1d for correctness.
X, y = data
X, y = X[:100], y[:100] # less data to run faster
y = y.flatten()
net = net_cls(module_pred_1d_cls)
net.fit(X, y)
msg_substr = (
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size"
)
assert not any(msg_substr in str(w.message) for w in recwarn.list)
def test_bagging_regressor(
self, net_cls, module_cls, data, module_pred_1d_cls, recwarn
):
# https://github.com/skorch-dev/skorch/issues/972
from sklearn.ensemble import BaggingRegressor
net = net_cls(module_pred_1d_cls) # module output should be 1d too
X, y = data
X, y = X[:100], y[:100] # less data to run faster
y = y.flatten() # make y 1d or else sklearn will complain
regr = BaggingRegressor(net, n_estimators=2, random_state=0)
regr.fit(X, y) # does not raise
# ensure there is no broadcast warning from torch
msg_substr = (
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size"
)
assert not any(msg_substr in str(w.message) for w in recwarn.list)
| TestNeuralNetRegressor |
python | ray-project__ray | python/ray/train/xgboost/config.py | {
"start": 4334,
"end": 6980
} | class ____(Backend):
def __init__(self):
self._tracker: Optional[RabitTracker] = None
def _setup_xgboost_distributed_backend(self, worker_group: BaseWorkerGroup):
# Set up the rabit tracker on the Train driver.
num_workers = len(worker_group)
rabit_args = {"DMLC_NUM_WORKER": num_workers}
train_driver_ip = ray.util.get_node_ip_address()
# NOTE: sortby="task" is needed to ensure that the xgboost worker ranks
# align with Ray Train worker ranks.
# The worker ranks will be sorted by `DMLC_TASK_ID`,
# which is defined below.
self._tracker = RabitTracker(
n_workers=num_workers, host_ip=train_driver_ip, sortby="task"
)
self._tracker.start(n_workers=num_workers)
worker_args = self._tracker.worker_envs()
rabit_args.update(worker_args)
start_log = (
"RabitTracker coordinator started with parameters:\n"
f"{json.dumps(rabit_args, indent=2)}"
)
logger.debug(start_log)
def set_xgboost_env_vars():
import ray.train
for k, v in rabit_args.items():
os.environ[k] = str(v)
# Ranks are assigned in increasing order of the worker's task id.
# This task id will be sorted by increasing world rank.
os.environ["DMLC_TASK_ID"] = (
f"[xgboost.ray-rank={ray.train.get_context().get_world_rank():08}]:"
f"{ray.get_runtime_context().get_actor_id()}"
)
worker_group.execute(set_xgboost_env_vars)
def on_training_start(
self, worker_group: BaseWorkerGroup, backend_config: XGBoostConfig
):
assert backend_config.xgboost_communicator == "rabit"
self._setup_xgboost_distributed_backend(worker_group)
def on_shutdown(self, worker_group: BaseWorkerGroup, backend_config: XGBoostConfig):
if not self._tracker:
return
timeout = 5
self._tracker.thread.join(timeout=timeout)
if self._tracker.thread.is_alive():
logger.warning(
"During shutdown, the RabitTracker thread failed to join "
f"within {timeout} seconds. "
"The process will still be terminated as part of Ray actor cleanup."
)
_xgboost_args: dict = {}
_xgboost_args_lock = threading.Lock()
def _set_xgboost_args(args):
with _xgboost_args_lock:
global _xgboost_args
_xgboost_args = args
def _get_xgboost_args() -> dict:
with _xgboost_args_lock:
return _xgboost_args
| _XGBoostRabitBackend_pre_xgb210 |
python | jazzband__django-waffle | waffle/apps.py | {
"start": 36,
"end": 258
} | class ____(AppConfig):
name = 'waffle'
verbose_name = 'django-waffle'
default_auto_field = 'django.db.models.AutoField'
def ready(self) -> None:
import waffle.signals # noqa: F401,PLC0415
| WaffleConfig |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 99346,
"end": 102709
} | class ____(fixtures.TestBase):
"""test #7594.
failure modes when INSERT doesn't actually insert a row.
s
"""
# the test manipulates INSERTS to become UPDATES to simulate
# "INSERT that returns no row" so both are needed; the manipulations
# are currently postgresql or SQLite specific
__sparse_driver_backend__ = True
__only_on__ = ("postgresql", "sqlite")
@testing.fixture
def null_server_default_fixture(self, registry, connection):
@registry.mapped
class MyClass:
__tablename__ = "my_table"
id = Column(Integer, primary_key=True)
data = Column(String(50))
registry.metadata.create_all(connection)
@event.listens_for(connection, "before_cursor_execute", retval=True)
def revert_insert(
conn, cursor, statement, parameters, context, executemany
):
if re.match(r"INSERT.* RETURNING (?:my_table.)?id", statement):
if executemany and isinstance(parameters, list):
# remove some rows, so the count is wrong
parameters = parameters[0:1]
else:
# statement should return no rows
statement = (
"UPDATE my_table SET id=NULL WHERE 1!=1 "
"RETURNING my_table.id"
)
parameters = {}
else:
assert not testing.against(
"postgresql"
), "this test has to at least run on PostgreSQL"
testing.config.skip_test(
"backend doesn't support the expected form of "
"RETURNING for this test to work"
)
return statement, parameters
return MyClass
@testing.only_on(
"postgresql",
"only postgresql uses RETURNING for a single-row "
"INSERT among the DBs we are using in this test",
)
def test_insert_single_no_pk_correct_exception(
self, null_server_default_fixture, connection
):
MyClass = null_server_default_fixture
sess = fixture_session(bind=connection)
m1 = MyClass(data="data")
sess.add(m1)
with expect_raises_message(
orm_exc.FlushError,
"Single-row INSERT statement for .*MyClass.* did not produce",
):
sess.flush()
is_true(inspect(m1).transient)
sess.rollback()
is_true(inspect(m1).transient)
def test_insert_multi_no_pk_correct_exception(
self, null_server_default_fixture, connection
):
MyClass = null_server_default_fixture
sess = fixture_session(bind=connection)
m1, m2, m3 = MyClass(data="d1"), MyClass(data="d2"), MyClass(data="d3")
sess.add_all([m1, m2, m3])
is_multi_row = connection.dialect.insert_executemany_returning
with expect_raises_message(
orm_exc.FlushError,
"%s INSERT statement for .*MyClass.* did not produce"
% ("Multi-row" if is_multi_row else "Single-row"),
):
sess.flush()
for m in m1, m2, m3:
is_true(inspect(m).transient)
sess.rollback()
for m in m1, m2, m3:
is_true(inspect(m).transient)
| NoRowInsertedTest |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 37484,
"end": 37974
} | class ____(CheckTestCase):
def test_not_boolean(self):
class TestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'save_as' must be a boolean.",
"admin.E101",
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(TestModelAdmin, ValidationTestModel)
| SaveAsCheckTests |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 6912,
"end": 8214
} | class ____(Tokenizer):
_tokens = (SETTING, ARGUMENT)
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
'suitepostcondition', 'testsetup', 'testprecondition',
'testteardown', 'testpostcondition', 'testtemplate')
_import_settings = ('library', 'resource', 'variables')
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
'testtimeout')
_custom_tokenizer = None
def __init__(self, template_setter=None):
Tokenizer.__init__(self)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 1 and self._template_setter:
self._template_setter(value)
if index == 0:
normalized = normalize(value)
if normalized in self._keyword_settings:
self._custom_tokenizer = KeywordCall(support_assign=False)
elif normalized in self._import_settings:
self._custom_tokenizer = ImportSetting()
elif normalized not in self._other_settings:
return ERROR
elif self._custom_tokenizer:
return self._custom_tokenizer.tokenize(value)
return Tokenizer._tokenize(self, value, index)
| Setting |
python | scrapy__scrapy | tests/mockserver/http.py | {
"start": 3149,
"end": 3293
} | class ____(BaseMockServer):
module_name = "tests.mockserver.http"
main = main_factory(Root)
if __name__ == "__main__":
main()
| MockServer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 524527,
"end": 525186
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("duration", "id", "start_date", "title", "title_html")
duration = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="duration")
id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="id")
start_date = sgqlc.types.Field(sgqlc.types.non_null(Date), graphql_name="startDate")
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
title_html = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="titleHTML"
)
| ProjectV2IterationFieldIteration |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 13006,
"end": 13048
} | class ____(Argument):
pass
| InputArgument |
python | simplejson__simplejson | simplejson/encoder.py | {
"start": 15003,
"end": 29093
} | class ____(JSONEncoder):
"""An encoder that produces JSON safe to embed in HTML.
To embed JSON content in, say, a script tag on a web page, the
characters &, < and > should be escaped. They cannot be escaped
with the usual entities (e.g. &) because they are not expanded
within <script> tags.
This class also escapes the line separator and paragraph separator
characters U+2028 and U+2029, irrespective of the ensure_ascii setting,
as these characters are not valid in JavaScript strings (see
http://timelessrepo.com/json-isnt-a-javascript-subset).
"""
def encode(self, o):
# Override JSONEncoder.encode because it has hacks for
# performance that make things more complicated.
chunks = self.iterencode(o)
if self.ensure_ascii:
return ''.join(chunks)
else:
return u''.join(chunks)
def iterencode(self, o):
chunks = super(JSONEncoderForHTML, self).iterencode(o)
for chunk in chunks:
chunk = chunk.replace('&', '\\u0026')
chunk = chunk.replace('<', '\\u003c')
chunk = chunk.replace('>', '\\u003e')
if not self.ensure_ascii:
chunk = chunk.replace(u'\u2028', '\\u2028')
chunk = chunk.replace(u'\u2029', '\\u2029')
yield chunk
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys,
_use_decimal, _namedtuple_as_object, _tuple_as_array,
_int_as_string_bitcount, _item_sort_key,
_encoding,_for_json,
_iterable_as_array,
## HACK: hand-optimized bytecode; turn globals into locals
_PY3=PY3,
ValueError=ValueError,
string_types=string_types,
Decimal=None,
dict=dict,
float=float,
id=id,
integer_types=integer_types,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
iter=iter,
):
if _use_decimal and Decimal is None:
Decimal = decimal.Decimal
if _item_sort_key and not callable(_item_sort_key):
raise TypeError("item_sort_key must be None or callable")
elif _sort_keys and not _item_sort_key:
_item_sort_key = itemgetter(0)
if (_int_as_string_bitcount is not None and
(_int_as_string_bitcount <= 0 or
not isinstance(_int_as_string_bitcount, integer_types))):
raise TypeError("int_as_string_bitcount must be a positive integer")
def call_method(obj, method_name):
method = getattr(obj, method_name, None)
if callable(method):
try:
return (method(),)
except TypeError:
pass
return None
def _encode_int(value):
skip_quoting = (
_int_as_string_bitcount is None
or
_int_as_string_bitcount < 1
)
if type(value) not in integer_types:
# See #118, do not trust custom str/repr
value = int(value)
if (
skip_quoting or
(-1 << _int_as_string_bitcount)
< value <
(1 << _int_as_string_bitcount)
):
return str(value)
return '"' + str(value) + '"'
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, string_types):
yield buf + _encoder(value)
elif _PY3 and isinstance(value, bytes) and _encoding is not None:
yield buf + _encoder(value)
elif isinstance(value, RawJSON):
yield buf + value.encoded_json
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, integer_types):
yield buf + _encode_int(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield buf + str(value)
else:
yield buf
for_json = _for_json and call_method(value, 'for_json')
if for_json:
chunks = _iterencode(for_json[0], _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and call_method(value, '_asdict')
if _asdict:
dct = _asdict[0]
if not isinstance(dct, dict):
raise TypeError("_asdict() must return a dict, not %s" % (type(dct).__name__,))
chunks = _iterencode_dict(dct,
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if first:
# iterable_as_array misses the fast path at the top
yield '[]'
else:
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield ']'
if markers is not None:
del markers[markerid]
def _stringify_key(key):
if isinstance(key, string_types): # pragma: no cover
pass
elif _PY3 and isinstance(key, bytes) and _encoding is not None:
key = str(key, _encoding)
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, integer_types):
if type(key) not in integer_types:
# See #118, do not trust custom str/repr
key = int(key)
key = str(key)
elif _use_decimal and isinstance(key, Decimal):
key = str(key)
elif _skipkeys:
key = None
else:
raise TypeError('keys must be str, int, float, bool or None, '
'not %s' % key.__class__.__name__)
return key
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (_indent * _current_indent_level)
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _PY3:
iteritems = dct.items()
else:
iteritems = dct.iteritems()
if _item_sort_key:
items = []
for k, v in dct.items():
if not isinstance(k, string_types):
k = _stringify_key(k)
if k is None:
continue
items.append((k, v))
items.sort(key=_item_sort_key)
else:
items = iteritems
for key, value in items:
if not (_item_sort_key or isinstance(key, string_types)):
key = _stringify_key(key)
if key is None:
# _skipkeys must be True
continue
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, string_types):
yield _encoder(value)
elif _PY3 and isinstance(value, bytes) and _encoding is not None:
yield _encoder(value)
elif isinstance(value, RawJSON):
yield value.encoded_json
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, integer_types):
yield _encode_int(value)
elif isinstance(value, float):
yield _floatstr(value)
elif _use_decimal and isinstance(value, Decimal):
yield str(value)
else:
for_json = _for_json and call_method(value, 'for_json')
if for_json:
chunks = _iterencode(for_json[0], _current_indent_level)
elif isinstance(value, list):
chunks = _iterencode_list(value, _current_indent_level)
else:
_asdict = _namedtuple_as_object and call_method(value, '_asdict')
if _asdict:
dct = _asdict[0]
if not isinstance(dct, dict):
raise TypeError("_asdict() must return a dict, not %s" % (type(dct).__name__,))
chunks = _iterencode_dict(dct,
_current_indent_level)
elif _tuple_as_array and isinstance(value, tuple):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (_indent * _current_indent_level)
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, string_types):
yield _encoder(o)
elif _PY3 and isinstance(o, bytes) and _encoding is not None:
yield _encoder(o)
elif isinstance(o, RawJSON):
yield o.encoded_json
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, integer_types):
yield _encode_int(o)
elif isinstance(o, float):
yield _floatstr(o)
else:
for_json = _for_json and call_method(o, 'for_json')
if for_json:
for chunk in _iterencode(for_json[0], _current_indent_level):
yield chunk
elif isinstance(o, list):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
else:
_asdict = _namedtuple_as_object and call_method(o, '_asdict')
if _asdict:
dct = _asdict[0]
if not isinstance(dct, dict):
raise TypeError("_asdict() must return a dict, not %s" % (type(dct).__name__,))
for chunk in _iterencode_dict(dct, _current_indent_level):
yield chunk
elif (_tuple_as_array and isinstance(o, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
elif _use_decimal and isinstance(o, Decimal):
yield str(o)
else:
while _iterable_as_array:
# Markers are not checked here because it is valid for
# an iterable to return self.
try:
o = iter(o)
except TypeError:
break
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
return
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| JSONEncoderForHTML |
python | pytorch__pytorch | torch/_dynamo/guards.py | {
"start": 36114,
"end": 36354
} | class ____(enum.Enum):
GUARD_MANAGER = 1
DICT_GUARD_MANAGER = 2
@functools.cache
def code_framelocals_names_reversed_cached(code: types.CodeType) -> list[str]:
return list(reversed(code_framelocals_names(code)))
| GuardManagerType |
python | walkccc__LeetCode | solutions/553. Optimal Division/553.py | {
"start": 0,
"end": 322
} | class ____:
def optimalDivision(self, nums: list[int]) -> str:
ans = str(nums[0])
if len(nums) == 1:
return ans
if len(nums) == 2:
return ans + '/' + str(nums[1])
ans += '/(' + str(nums[1])
for i in range(2, len(nums)):
ans += '/' + str(nums[i])
ans += ')'
return ans
| Solution |
python | walkccc__LeetCode | solutions/114. Flatten Binary Tree to Linked List/114-3.py | {
"start": 0,
"end": 464
} | class ____:
def flatten(self, root: TreeNode | None) -> None:
if not root:
return
while root:
if root.left:
# Find the rightmost root
rightmost = root.left
while rightmost.right:
rightmost = rightmost.right
# Rewire the connections
rightmost.right = root.right
root.right = root.left
root.left = None
# Move on to the right side of the tree
root = root.right
| Solution |
python | kamyu104__LeetCode-Solutions | Python/prime-pairs-with-target-sum.py | {
"start": 45,
"end": 882
} | class ____(object):
def findPrimePairs(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
def linear_sieve_of_eratosthenes(n):
primes = []
spf = [-1]*(n+1) # the smallest prime factor
for i in xrange(2, n+1):
if spf[i] == -1:
spf[i] = i
primes.append(i)
for p in primes:
if i*p > n or p > spf[i]:
break
spf[i*p] = p
return spf # len(primes) = O(n/(logn-1)), reference: https://math.stackexchange.com/questions/264544/how-to-find-number-of-prime-numbers-up-to-to-n
spf = linear_sieve_of_eratosthenes(n)
return [[i, n-i] for i in xrange(2, n//2+1) if spf[i] == i and spf[n-i] == n-i]
| Solution |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip.py | {
"start": 4647,
"end": 5697
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss from the text decoder.
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
loss: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
scores.
"""
)
| BlipTextVisionModelOutput |
python | nedbat__coveragepy | tests/test_api.py | {
"start": 45104,
"end": 50185
} | class ____(CoverageTest):
"""Tests of the relative_files setting."""
def test_moving_stuff(self) -> None:
# When using absolute file names, moving the source around results in
# "No source for code" errors while reporting.
self.make_file("foo.py", "a = 1")
cov = coverage.Coverage(source=["."])
self.start_import_stop(cov, "foo")
res = cov.report()
assert res == 100
expected = re.escape("No source for code: '{}'.".format(abs_file("foo.py")))
os.remove("foo.py")
self.make_file("new/foo.py", "a = 1")
shutil.move(".coverage", "new/.coverage")
with change_dir("new"):
cov = coverage.Coverage()
cov.load()
with pytest.raises(NoSource, match=expected):
cov.report()
def test_moving_stuff_with_relative(self) -> None:
# When using relative file names, moving the source around is fine.
self.make_file("foo.py", "a = 1")
self.make_file(
".coveragerc",
"""\
[run]
relative_files = true
""",
)
cov = coverage.Coverage(source=["."])
self.start_import_stop(cov, "foo")
res = cov.report()
assert res == 100
os.remove("foo.py")
self.make_file("new/foo.py", "a = 1")
shutil.move(".coverage", "new/.coverage")
shutil.move(".coveragerc", "new/.coveragerc")
with change_dir("new"):
cov = coverage.Coverage()
cov.load()
res = cov.report()
assert res == 100
def test_combine_relative(self) -> None:
self.make_file(
"foo.py",
"""\
import mod
a = 1
""",
)
self.make_file("lib/mod/__init__.py", "x = 1")
self.make_file(
".coveragerc",
"""\
[run]
relative_files = true
""",
)
sys.path.append("lib")
cov = coverage.Coverage(source=["."], data_suffix=True)
self.start_import_stop(cov, "foo")
cov.save()
self.make_file("dir2/bar.py", "a = 1")
self.make_file(
"dir2/.coveragerc",
"""\
[run]
relative_files = true
""",
)
with change_dir("dir2"):
cov = coverage.Coverage(source=["."], data_suffix=True)
self.start_import_stop(cov, "bar")
cov.save()
shutil.move(glob.glob(".coverage.*")[0], "..")
self.make_file("foo.py", "a = 1")
self.make_file("bar.py", "a = 1")
self.make_file("modsrc/__init__.py", "x = 1")
self.make_file(
".coveragerc",
"""\
[run]
relative_files = true
[paths]
source =
modsrc
*/mod
""",
)
cov = coverage.Coverage()
cov.combine()
cov.save()
cov = coverage.Coverage()
cov.load()
files = cov.get_data().measured_files()
assert files == {"foo.py", "bar.py", os_sep("modsrc/__init__.py")}
res = cov.report()
assert res == 100
def test_combine_no_suffix_multiprocessing(self) -> None:
self.make_file(
".coveragerc",
"""\
[run]
branch = True
""",
)
cov = coverage.Coverage(
config_file=".coveragerc",
concurrency="multiprocessing",
data_suffix=False,
)
cov.start()
cov.stop()
# The warning isn't the point of this test, but suppress it.
with pytest.warns(Warning) as warns:
cov.combine()
assert_coverage_warnings(warns, "No data was collected. (no-data-collected)")
cov.save()
self.assert_file_count(".coverage.*", 0)
self.assert_exists(".coverage")
def test_files_up_one_level(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/1280
self.make_file(
"src/mycode.py",
"""\
def foo():
return 17
""",
)
self.make_file(
"test/test_it.py",
"""\
from src.mycode import foo
assert foo() == 17
""",
)
self.make_file(
"test/.coveragerc",
"""\
[run]
parallel = True
relative_files = True
[paths]
source =
../src/
*/src
""",
)
os.chdir("test")
sys.path.insert(0, "..")
cov1 = coverage.Coverage()
self.start_import_stop(cov1, "test_it")
cov1.save()
cov2 = coverage.Coverage()
cov2.combine()
cov3 = coverage.Coverage()
cov3.load()
report = self.get_report(cov3)
assert self.last_line_squeezed(report) == "TOTAL 4 0 100%"
| RelativePathTest |
python | readthedocs__readthedocs.org | readthedocs/organizations/models.py | {
"start": 11368,
"end": 13541
} | class ____(models.Model):
"""Model to keep track of invitations to an organization."""
# Auto fields
pub_date = models.DateTimeField(_("Publication date"), auto_now_add=True)
modified_date = models.DateTimeField(_("Modified date"), auto_now=True)
# Foreign
organization = models.ForeignKey(
Organization,
related_name="invites",
on_delete=models.CASCADE,
)
team = models.ForeignKey(
Team,
verbose_name=_("Team"),
related_name="invites",
on_delete=models.CASCADE,
)
email = models.EmailField(_("E-mail"))
hash = models.CharField(_("Hash"), max_length=250)
count = models.IntegerField(_("Count"), default=0)
total = models.IntegerField(_("Total"), default=10)
class Meta:
unique_together = ("team", "email")
def __str__(self):
return self.email
def save(self, *args, **kwargs):
hash_ = salted_hmac(
# HMAC key per applications
".".join([self.__module__, self.__class__.__name__]),
# HMAC message
"".join([str(self.team), str(self.email)]),
)
self.hash = hash_.hexdigest()[::2]
super().save(*args, **kwargs)
def migrate(self):
"""
Migrate this invite to our new invitations model.
New invitations require a from_user, old invitations don't
track this, so we default to the first owner of the organization.
The related TeamMember model will be deleted,
so the invitation isn't listed twice in the team members page.
"""
from readthedocs.invitations.models import Invitation
owner = self.organization.owners.first()
content_type = ContentType.objects.get_for_model(self.team)
invitation, created = Invitation.objects.get_or_create(
token=self.hash,
defaults={
"from_user": owner,
"to_email": self.email,
"content_type": content_type,
"object_id": self.team.pk,
},
)
self.teammember_set.all().delete()
return invitation, created
| TeamInvite |
python | bottlepy__bottle | test/test_mdict.py | {
"start": 58,
"end": 1975
} | class ____(unittest.TestCase):
def test_isadict(self):
""" MultiDict should behaves like a normal dict """
d, m = dict(a=5), MultiDict(a=5)
d['key'], m['key'] = 'value', 'value'
d['k2'], m['k2'] = 'v1', 'v1'
d['k2'], m['k2'] = 'v2', 'v2'
self.assertEqual(list(d.keys()), list(m.keys()))
self.assertEqual(list(d.values()), list(m.values()))
self.assertEqual(list(d.keys()), list(m.iterkeys()))
self.assertEqual(list(d.values()), list(m.itervalues()))
self.assertEqual(d.get('key'), m.get('key'))
self.assertEqual(d.get('cay'), m.get('cay'))
self.assertEqual(list(iter(d)), list(iter(m)))
self.assertEqual([k for k in d], [k for k in m])
self.assertEqual(len(d), len(m))
self.assertEqual('key' in d, 'key' in m)
self.assertEqual('cay' in d, 'cay' in m)
self.assertRaises(KeyError, lambda: m['cay'])
def test_ismulti(self):
""" MultiDict has some special features """
m = MultiDict(a=5)
m['a'] = 6
self.assertEqual([5, 6], m.getall('a'))
self.assertEqual([], m.getall('b'))
self.assertEqual([('a', 5), ('a', 6)], list(m.iterallitems()))
def test_isheader(self):
""" HeaderDict replaces by default and title()s its keys """
m = HeaderDict(abc_def=5)
m['abc_def'] = 6
self.assertEqual(['6'], m.getall('abc_def'))
m.append('abc_def', 7)
self.assertEqual(['6', '7'], m.getall('abc_def'))
self.assertEqual([('Abc-Def', '6'), ('Abc-Def', '7')], list(m.iterallitems()))
def test_headergetbug(self):
''' Assure HeaderDict.get() to be case insensitive '''
d = HeaderDict()
d['UPPER'] = 'UPPER'
d['lower'] = 'lower'
self.assertEqual(d.get('upper'), 'UPPER')
self.assertEqual(d.get('LOWER'), 'lower')
| TestMultiDict |
python | kamyu104__LeetCode-Solutions | Python/splitting-a-string-into-descending-consecutive-values.py | {
"start": 31,
"end": 636
} | class ____(object):
def splitString(self, s):
"""
:type s: str
:rtype: bool
"""
def backtracking(s, i, num, cnt):
if i == len(s):
return cnt >= 2
new_num = 0
for j in xrange(i, len(s)):
new_num = new_num*10 + int(s[j])
if new_num >= num >= 0:
break
if (num == -1 or num-1 == new_num) and backtracking(s, j+1, new_num, cnt+1):
return True
return False
return backtracking(s, 0, -1, 0)
| Solution |
python | lepture__authlib | tests/flask/test_oauth2/test_code_challenge.py | {
"start": 824,
"end": 8600
} | class ____(_CodeChallenge):
SUPPORTED_CODE_CHALLENGE_METHOD = ["plain", "S256", "S128"]
@pytest.fixture(autouse=True)
def server(server):
server.register_grant(AuthorizationCodeGrant, [CodeChallenge(required=True)])
return server
@pytest.fixture(autouse=True)
def client(client, db):
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "none",
"response_types": ["code"],
"grant_types": ["authorization_code"],
}
)
db.session.add(client)
db.session.commit()
return client
def test_missing_code_challenge(test_client):
rv = test_client.get(authorize_url + "&code_challenge_method=plain")
assert "Missing" in rv.location
def test_has_code_challenge(test_client):
rv = test_client.get(
authorize_url + "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s"
)
assert rv.data == b"ok"
def test_invalid_code_challenge(test_client):
rv = test_client.get(
authorize_url + "&code_challenge=abc&code_challenge_method=plain"
)
assert "Invalid" in rv.location
def test_invalid_code_challenge_method(test_client):
suffix = "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s&code_challenge_method=invalid"
rv = test_client.get(authorize_url + suffix)
assert "Unsupported" in rv.location
def test_supported_code_challenge_method(test_client):
suffix = "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s&code_challenge_method=plain"
rv = test_client.get(authorize_url + suffix)
assert rv.data == b"ok"
def test_trusted_client_without_code_challenge(test_client, db, client):
client.client_secret = "client-secret"
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "client_secret_basic",
"response_types": ["code"],
"grant_types": ["authorization_code"],
}
)
db.session.add(client)
db.session.commit()
rv = test_client.get(authorize_url)
assert rv.data == b"ok"
rv = test_client.post(authorize_url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
headers = create_basic_header("client-id", "client-secret")
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
},
headers=headers,
)
resp = json.loads(rv.data)
assert "access_token" in resp
def test_missing_code_verifier(test_client):
url = authorize_url + "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "Missing" in resp["error_description"]
def test_trusted_client_missing_code_verifier(test_client, db, client):
client.client_secret = "client-secret"
client.set_client_metadata(
{
"redirect_uris": ["https://client.test"],
"scope": "profile address",
"token_endpoint_auth_method": "client_secret_basic",
"response_types": ["code"],
"grant_types": ["authorization_code"],
}
)
db.session.add(client)
db.session.commit()
url = authorize_url + "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
headers = create_basic_header("client-id", "client-secret")
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
},
headers=headers,
)
resp = json.loads(rv.data)
assert "Missing" in resp["error_description"]
def test_plain_code_challenge_invalid(test_client):
url = authorize_url + "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"code_verifier": "bar",
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "Invalid" in resp["error_description"]
def test_plain_code_challenge_failed(test_client):
url = authorize_url + "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"code_verifier": generate_token(48),
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "failed" in resp["error_description"]
def test_plain_code_challenge_success(test_client):
code_verifier = generate_token(48)
url = authorize_url + "&code_challenge=" + code_verifier
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"code_verifier": code_verifier,
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "access_token" in resp
def test_s256_code_challenge_success(test_client):
code_verifier = generate_token(48)
code_challenge = create_s256_code_challenge(code_verifier)
url = authorize_url + "&code_challenge=" + code_challenge
url += "&code_challenge_method=S256"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
rv = test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"code_verifier": code_verifier,
"client_id": "client-id",
},
)
resp = json.loads(rv.data)
assert "access_token" in resp
def test_not_implemented_code_challenge_method(test_client):
url = authorize_url + "&code_challenge=Zhs2POMonIVVHZteWfoU7cSXQSm0YjghikFGJSDI2_s"
url += "&code_challenge_method=S128"
rv = test_client.post(url, data={"user_id": "1"})
assert "code=" in rv.location
params = dict(url_decode(urlparse.urlparse(rv.location).query))
code = params["code"]
with pytest.raises(RuntimeError):
test_client.post(
"/oauth/token",
data={
"grant_type": "authorization_code",
"code": code,
"code_verifier": generate_token(48),
"client_id": "client-id",
},
)
| CodeChallenge |
python | pytorch__pytorch | test/inductor/test_flex_attention.py | {
"start": 234712,
"end": 235705
} | class ____:
batch_size: int
num_heads: int
seq_length: int
head_dim: int
dtype: torch.dtype
config_str: Optional[str] = None
def __str__(self):
return f"batch:{self.batch_size}_head:{self.num_heads}_seq_len:{self.seq_length}_headdim:{self.head_dim}_dtype:{str(self.dtype).split('.')[-1]}"
def get_params(dtypes: list[torch.dtype]) -> list[Params]:
params = []
seq_lengths = [37, 256, 277]
for seq_len, dtype in product(seq_lengths, dtypes):
params.append(
Params(
batch_size=2, num_heads=4, seq_length=seq_len, head_dim=16, dtype=dtype
)
)
return params
supports_learnable_bias = unittest.skipUnless(
(
(torch.cuda.is_available() and has_triton())
and (torch.cuda.get_device_capability() >= (8, 0) or torch.version.hip)
),
"Requires Triton + A100 or Triton + ROCm",
)
@supports_learnable_bias
@large_tensor_test_class("2GB", device=test_device[0])
| Params |
python | apache__airflow | devel-common/src/tests_common/pytest_plugin.py | {
"start": 63041,
"end": 64646
} | class ____(Protocol):
"""Type stub for create_task_instance_of_operator and create_serialized_task_instance_of_operator."""
def __call__(
self,
operator_class: type[BaseOperator],
*,
dag_id: str,
logical_date: datetime = ...,
session: Session = ...,
**kwargs,
) -> TaskInstance: ...
@pytest.fixture
def create_serialized_task_instance_of_operator(dag_maker: DagMaker) -> CreateTaskInstanceOfOperator:
from tests_common.test_utils.version_compat import NOTSET
def _create_task_instance(
operator_class,
*,
dag_id,
logical_date=NOTSET,
session=None,
**operator_kwargs,
) -> TaskInstance:
with dag_maker(dag_id=dag_id, serialized=True, session=session):
operator_class(**operator_kwargs)
(ti,) = dag_maker.create_dagrun(logical_date=logical_date).task_instances
return ti
return _create_task_instance
@pytest.fixture
def create_task_instance_of_operator(dag_maker: DagMaker) -> CreateTaskInstanceOfOperator:
from tests_common.test_utils.version_compat import NOTSET
def _create_task_instance(
operator_class,
*,
dag_id,
logical_date=NOTSET,
session=None,
**operator_kwargs,
) -> TaskInstance:
with dag_maker(dag_id=dag_id, session=session, serialized=True):
operator_class(**operator_kwargs)
(ti,) = dag_maker.create_dagrun(logical_date=logical_date).task_instances
return ti
return _create_task_instance
| CreateTaskInstanceOfOperator |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_templatetags.py | {
"start": 11793,
"end": 13125
} | class ____(ThumbnailerBase):
def test_check_generate(self):
src = (
'{% with t=filename|thumbnailer_passive %}'
'{{ t.generate }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, 'False')
def test_get_existing(self):
options = settings.THUMBNAIL_ALIASES['']['small']
# Pregenerate the thumbnail.
get_thumbnailer(self.storage, self.filename).get_thumbnail(options)
src = (
'{% with t=filename|thumbnailer_passive %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
expected = self.verify_thumbnail((20, 20), options)
expected_url = ''.join((settings.MEDIA_URL, expected))
self.assertEqual(output, expected_url)
def test_get_missing(self):
src = (
'{% with t=filename|thumbnailer_passive %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, '')
def test_invalid(self):
src = (
'{% with t=invalid_filename|thumbnailer_passive %}'
'{{ t.small.url }}{% endwith %}'
)
output = self.render_template(src)
self.assertEqual(output, '')
| ThumbnailerPassiveFilterTest |
python | gevent__gevent | src/greentest/3.13/test_subprocess.py | {
"start": 71300,
"end": 80433
} | class ____(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python(
"import time; time.sleep(3600)",
timeout=0.1, stdout=subprocess.PIPE)
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
@unittest.skipUnless(mswindows, "Maybe test trigger a leak on Ubuntu")
def test_run_with_an_empty_env(self):
# gh-105436: fix subprocess.run(..., env={}) broken on Windows
args = [sys.executable, "-c", 'pass']
# Ignore subprocess errors - we only care that the API doesn't
# raise an OSError
subprocess.run(args, env={})
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_stdout(self):
# run() refuses to accept stdout=STDOUT
with self.assertRaises(ValueError,
msg=("STDOUT can only be used for stderr")):
self.run_python("print('will not be run')",
stdout=subprocess.STDOUT)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def test_encoding_warning(self):
code = textwrap.dedent("""\
from subprocess import *
run("echo hello", shell=True, text=True)
check_output("echo hello", shell=True, text=True)
""")
cp = subprocess.run([sys.executable, "-Xwarn_default_encoding", "-c", code],
capture_output=True)
lines = cp.stderr.splitlines()
self.assertEqual(len(lines), 2, lines)
self.assertTrue(lines[0].startswith(b"<string>:2: EncodingWarning: "))
self.assertTrue(lines[1].startswith(b"<string>:3: EncodingWarning: "))
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
| RunFuncTestCase |
python | PrefectHQ__prefect | src/prefect/events/schemas/deployment_triggers.py | {
"start": 970,
"end": 2375
} | class ____(PrefectBaseModel, abc.ABC, extra="ignore"): # type: ignore[call-arg]
"""
Base class describing a set of criteria that must be satisfied in order to trigger
an automation.
"""
# Fields from Automation
name: Optional[str] = Field(
default=None,
description="The name to give to the automation created for this trigger.",
)
description: str = Field(
default="", description="A longer description of this automation"
)
enabled: bool = Field(
default=True, description="Whether this automation will be evaluated"
)
# Fields from the RunDeployment action
parameters: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"The parameters to pass to the deployment, or None to use the "
"deployment's default parameters"
),
)
job_variables: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"Job variables to pass to the deployment, or None to use the "
"deployment's default job variables"
),
)
schedule_after: NonNegativeTimeDelta = Field(
default_factory=lambda: timedelta(0),
description=(
"The amount of time to wait before running the deployment. "
"Defaults to running the deployment immediately."
),
)
| BaseDeploymentTrigger |
python | tiangolo__fastapi | docs_src/query_param_models/tutorial002_pv1_an.py | {
"start": 166,
"end": 522
} | class ____(BaseModel):
class Config:
extra = "forbid"
limit: int = Field(100, gt=0, le=100)
offset: int = Field(0, ge=0)
order_by: Literal["created_at", "updated_at"] = "created_at"
tags: List[str] = []
@app.get("/items/")
async def read_items(filter_query: Annotated[FilterParams, Query()]):
return filter_query
| FilterParams |
python | gevent__gevent | src/gevent/tests/test__pool.py | {
"start": 15851,
"end": 16054
} | class ____(greentest.TestCase):
switch_expected = False
def test(self):
p = gevent.pool.Pool()
res = p.join()
self.assertTrue(res, "empty should return true")
| TestJoinEmpty |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/mapping/static.py | {
"start": 1033,
"end": 7755
} | class ____(
PartitionMapping,
NamedTuple(
"_StaticPartitionMapping",
[
(
"downstream_partition_keys_by_upstream_partition_key",
PublicAttr[Mapping[str, Union[str, Collection[str]]]],
)
],
),
):
"""Define an explicit correspondence between two StaticPartitionsDefinitions.
Args:
downstream_partition_keys_by_upstream_partition_key (Dict[str, str | Collection[str]]):
The single or multi-valued correspondence from upstream keys to downstream keys.
"""
def __init__(
self,
downstream_partition_keys_by_upstream_partition_key: Mapping[
str, Union[str, Collection[str]]
],
):
check.mapping_param(
downstream_partition_keys_by_upstream_partition_key,
"downstream_partition_keys_by_upstream_partition_key",
key_type=str,
value_type=(str, collections.abc.Collection),
)
# cache forward and reverse mappings
self._mapping = defaultdict(set)
for (
upstream_key,
downstream_keys,
) in downstream_partition_keys_by_upstream_partition_key.items():
self._mapping[upstream_key] = (
{downstream_keys} if isinstance(downstream_keys, str) else set(downstream_keys)
)
self._inverse_mapping = defaultdict(set)
for upstream_key, downstream_keys in self._mapping.items():
for downstream_key in downstream_keys:
self._inverse_mapping[downstream_key].add(upstream_key)
def validate_partition_mapping(
self,
upstream_partitions_def: PartitionsDefinition,
downstream_partitions_def: Optional[PartitionsDefinition],
):
if not isinstance(upstream_partitions_def, StaticPartitionsDefinition):
raise DagsterInvalidDefinitionError(
"Upstream partitions definition must be a StaticPartitionsDefinition"
)
if not isinstance(downstream_partitions_def, StaticPartitionsDefinition):
raise DagsterInvalidDefinitionError(
"Downstream partitions definition must be a StaticPartitionsDefinition",
)
self._check_upstream(
upstream_partitions_def=cast("StaticPartitionsDefinition", upstream_partitions_def)
)
self._check_downstream(
downstream_partitions_def=cast("StaticPartitionsDefinition", downstream_partitions_def)
)
@cached_method
def _check_upstream(self, *, upstream_partitions_def: StaticPartitionsDefinition):
"""Validate that the mapping from upstream to downstream is only defined on upstream keys."""
check.inst_param(
upstream_partitions_def,
"upstream_partitions_def",
StaticPartitionsDefinition,
"StaticPartitionMapping can only be defined between two StaticPartitionsDefinitions",
)
upstream_keys = upstream_partitions_def.get_partition_keys()
extra_keys = set(self._mapping.keys()).difference(upstream_keys)
if extra_keys:
raise ValueError(
f"mapping source partitions not in the upstream partitions definition: {extra_keys}"
)
@cached_method
def _check_downstream(self, *, downstream_partitions_def: StaticPartitionsDefinition):
"""Validate that the mapping from upstream to downstream only maps to downstream keys."""
check.inst_param(
downstream_partitions_def,
"downstream_partitions_def",
StaticPartitionsDefinition,
"StaticPartitionMapping can only be defined between two StaticPartitionsDefinitions",
)
downstream_keys = downstream_partitions_def.get_partition_keys()
extra_keys = set(self._inverse_mapping.keys()).difference(downstream_keys)
if extra_keys:
raise ValueError(
"mapping target partitions not in the downstream partitions definition:"
f" {extra_keys}"
)
def get_downstream_partitions_for_partitions( # pyright: ignore[reportIncompatibleMethodOverride]
self,
upstream_partitions_subset: PartitionsSubset,
upstream_partitions_def: StaticPartitionsDefinition,
downstream_partitions_def: StaticPartitionsDefinition,
current_time: Optional[datetime] = None,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> PartitionsSubset:
with partition_loading_context(current_time, dynamic_partitions_store):
self._check_downstream(downstream_partitions_def=downstream_partitions_def)
downstream_subset = downstream_partitions_def.empty_subset()
downstream_keys = set()
for key in upstream_partitions_subset.get_partition_keys():
downstream_keys.update(self._mapping[key])
return downstream_subset.with_partition_keys(downstream_keys)
def get_upstream_mapped_partitions_result_for_partitions( # pyright: ignore[reportIncompatibleMethodOverride]
self,
downstream_partitions_subset: Optional[PartitionsSubset],
downstream_partitions_def: Optional[PartitionsDefinition],
upstream_partitions_def: StaticPartitionsDefinition,
current_time: Optional[datetime] = None,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> UpstreamPartitionsResult:
with partition_loading_context(current_time, dynamic_partitions_store):
self._check_upstream(upstream_partitions_def=upstream_partitions_def)
upstream_subset = upstream_partitions_def.empty_subset()
if downstream_partitions_subset is None:
return UpstreamPartitionsResult(
partitions_subset=upstream_subset,
required_but_nonexistent_subset=upstream_partitions_def.empty_subset(),
)
upstream_keys = set()
for key in downstream_partitions_subset.get_partition_keys():
upstream_keys.update(self._inverse_mapping[key])
return UpstreamPartitionsResult(
partitions_subset=upstream_subset.with_partition_keys(upstream_keys),
required_but_nonexistent_subset=upstream_partitions_def.empty_subset(),
)
@property
def description(self) -> str:
return (
f"Maps upstream partitions to their downstream dependencies according to the "
f"following mapping: \n{self.downstream_partition_keys_by_upstream_partition_key}"
)
| StaticPartitionMapping |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 19780,
"end": 20183
} | class ____(TextAlignment):
def _default_values(self, n: int) -> list:
vals = itertools.cycle(["top", "bottom"])
return [next(vals) for _ in range(n)]
# =================================================================================== #
# Properties with RGB(A) color values
# =================================================================================== #
| VerticalAlignment |
python | pypa__warehouse | warehouse/integrations/vulnerabilities/osv/__init__.py | {
"start": 483,
"end": 3440
} | class ____(vulnerabilities.VulnerabilityVerifier):
def __init__(
self,
session,
metrics,
public_keys_api_url: str = OSV_PUBLIC_KEYS_URL,
public_keys_cache=DEFAULT_PUBLIC_KEYS_CACHE,
):
super().__init__(
metrics=metrics, source="osv", public_keys_cache=public_keys_cache
)
self._session = session
self._metrics = metrics
self._public_key_url = public_keys_api_url
def retrieve_public_key_payload(self):
try:
response = self._session.get(self._public_key_url)
response.raise_for_status()
return response.json()
except requests.HTTPError as exc:
raise OSVPublicKeyAPIError(
f"Invalid response code {response.status_code}: {response.text[:100]}",
f"public_key_api.status.{response.status_code}",
) from exc
except json.JSONDecodeError as exc:
raise OSVPublicKeyAPIError(
f"Non-JSON response received: {response.text[:100]}",
"public_key_api.invalid_json",
) from exc
except requests.RequestException as exc:
raise OSVPublicKeyAPIError(
"Could not connect to OSV", "public_key_api.network_error"
) from exc
def extract_public_keys(self, pubkey_api_data):
if not isinstance(pubkey_api_data, dict):
raise OSVPublicKeyAPIError(
f"Payload is not a dict but: {str(pubkey_api_data)[:100]}",
"public_key_api.format_error",
)
try:
public_keys = pubkey_api_data["public_keys"]
except KeyError:
raise OSVPublicKeyAPIError(
"Payload misses 'public_keys' attribute", "public_key_api.format_error"
)
if not isinstance(public_keys, list):
raise OSVPublicKeyAPIError(
"Payload 'public_keys' attribute is not a list",
"public_key_api.format_error",
)
expected_attributes = {"key", "key_identifier"}
result = []
for public_key in public_keys:
if not isinstance(public_key, dict):
raise OSVPublicKeyAPIError(
f"Key is not a dict but: {public_key}",
"public_key_api.format_error",
)
attributes = set(public_key)
if not expected_attributes <= attributes:
raise OSVPublicKeyAPIError(
"Missing attribute in key: "
f"{sorted(expected_attributes - attributes)}",
"public_key_api.format_error",
)
result.append(
{"key": public_key["key"], "key_id": public_key["key_identifier"]}
)
self._public_keys_cache.set(now=time.time(), value=result)
return result
| VulnerabilityReportVerifier |
python | huggingface__transformers | src/transformers/models/hiera/modeling_hiera.py | {
"start": 35252,
"end": 35913
} | class ____(nn.Module):
def __init__(self, config: HieraConfig):
super().__init__()
num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1))
self.layernorm = nn.LayerNorm(num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = hidden_states.transpose(1, 2)
pooled_output = self.pooler(hidden_states)
pooled_output = torch.flatten(pooled_output, 1)
pooled_output = self.layernorm(pooled_output)
return pooled_output
@auto_docstring
| HieraPooler |
python | kamyu104__LeetCode-Solutions | Python/image-smoother.py | {
"start": 33,
"end": 762
} | class ____(object):
def imageSmoother(self, M):
"""
:type M: List[List[int]]
:rtype: List[List[int]]
"""
def getGray(M, i, j):
total, count = 0, 0.0
for r in xrange(-1, 2):
for c in xrange(-1, 2):
ii, jj = i + r, j + c
if 0 <= ii < len(M) and 0 <= jj < len(M[0]):
total += M[ii][jj]
count += 1.0
return int(total / count)
result = [[0 for _ in xrange(len(M[0]))] for _ in xrange(len(M))]
for i in xrange(len(M)):
for j in xrange(len(M[0])):
result[i][j] = getGray(M, i, j)
return result
| Solution |
python | django__django | tests/admin_filters/tests.py | {
"start": 83510,
"end": 83791
} | class ____(SimpleTestCase):
def test_get_facet_counts(self):
msg = "subclasses of FacetsMixin must provide a get_facet_counts() method."
with self.assertRaisesMessage(NotImplementedError, msg):
FacetsMixin().get_facet_counts(None, None)
| FacetsMixinTests |
python | keras-team__keras | keras/src/ops/numpy_test.py | {
"start": 18309,
"end": 37227
} | class ____(testing.TestCase):
def test_add(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.add(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.add(x, y)
def test_heaviside(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.heaviside(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((3,))
self.assertEqual(knp.heaviside(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((1, 3))
self.assertEqual(knp.heaviside(x, y).shape, (2, 3))
def test_hypot(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.hypot(x, y).shape, (2, 3))
def test_subtract(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.subtract(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.subtract(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.subtract(x, y)
def test_multiply(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.multiply(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.multiply(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.multiply(x, y)
def test_matmul(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 2))
self.assertEqual(knp.matmul(x, y).shape, (2, 2))
with self.assertRaises(ValueError):
x = KerasTensor((3, 4))
y = KerasTensor((2, 3, 4))
knp.matmul(x, y)
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_matmul_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((3, 2))
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
x = KerasTensor((2, 3))
y = KerasTensor((3, 2), sparse=True)
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((3, 2), sparse=True)
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
self.assertTrue(result.sparse)
def test_power(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.power(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.power(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.power(x, y)
def test_divide(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.divide(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.divide(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.divide(x, y)
def test_divide_no_nan(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.divide_no_nan(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.divide_no_nan(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.divide_no_nan(x, y)
def test_true_divide(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.true_divide(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.true_divide(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.true_divide(x, y)
def test_append(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.append(x, y).shape, (12,))
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.append(x, y, axis=0).shape, (4, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.append(x, y, axis=2)
def test_arctan2(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.arctan2(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.arctan2(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.arctan2(x, y)
def test_bitwise_and(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_and(x, y).shape, (2, 3))
def test_bitwise_or(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_or(x, y).shape, (2, 3))
def test_bitwise_xor(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_xor(x, y).shape, (2, 3))
def test_bitwise_left_shift(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_left_shift(x, y).shape, (2, 3))
# left_shift is same as bitwise_left_shift
def test_bitwise_right_shift(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_right_shift(x, y).shape, (2, 3))
# right_shift is same as bitwise_right_shift
def test_cross(self):
x1 = KerasTensor((2, 3, 3))
x2 = KerasTensor((1, 3, 2))
y1 = KerasTensor((2, 3, 3))
y2 = KerasTensor((2, 3, 2))
self.assertEqual(knp.cross(x1, y1).shape, (2, 3, 3))
self.assertEqual(knp.cross(x2, y2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.cross(x, y)
with self.assertRaises(ValueError):
x = KerasTensor((4, 3, 3))
y = KerasTensor((2, 3, 3))
knp.cross(x, y)
def test_einsum(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 4))
self.assertEqual(knp.einsum("ij,jk->ik", x, y).shape, (2, 4))
self.assertEqual(knp.einsum("ij,jk->ikj", x, y).shape, (2, 4, 3))
self.assertEqual(knp.einsum("ii", x).shape, ())
self.assertEqual(knp.einsum(",ij", 5, x).shape, (2, 3))
x = KerasTensor((2, 3, 4))
y = KerasTensor((3, 4, 5))
z = KerasTensor((1, 1, 1, 9))
self.assertEqual(knp.einsum("ijk,jkl->li", x, y).shape, (5, 2))
self.assertEqual(knp.einsum("ijk,jkl->lij", x, y).shape, (5, 2, 3))
self.assertEqual(knp.einsum("...,...j->...j", x, y).shape, (2, 3, 4, 5))
self.assertEqual(
knp.einsum("i...,...j->i...j", x, y).shape, (2, 3, 4, 5)
)
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, 2, 5))
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, 2, 5))
self.assertEqual(
knp.einsum("i...,...j,...k", x, y, z).shape, (1, 3, 4, 2, 5, 9)
)
self.assertEqual(
knp.einsum("mij,ijk,...", x, y, z).shape, (1, 1, 1, 9, 5, 2)
)
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((3, 4))
knp.einsum("ijk,jk->ik", x, y)
def test_full_like(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.full_like(x, 2).shape, (2, 3))
def test_gcd(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.gcd(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.gcd(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.gcd(x, y)
def test_greater(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.greater(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.greater(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.greater(x, y)
def test_greater_equal(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.greater_equal(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.greater_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.greater_equal(x, y)
def test_isclose(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.isclose(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.isclose(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.isclose(x, y)
def test_isin(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 3))
self.assertEqual(knp.isin(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.isin(x, 2).shape, (2, 3))
def test_kron(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.kron(x, y).shape, (4, 9))
def test_lcm(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.lcm(x, y).shape, (2, 3))
def test_less(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.less(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.less(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.less(x, y)
def test_less_equal(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.less_equal(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.less_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.less_equal(x, y)
def test_linspace(self):
start = KerasTensor((2, 3, 4))
stop = KerasTensor((2, 3, 4))
self.assertEqual(knp.linspace(start, stop, 10).shape, (10, 2, 3, 4))
with self.assertRaises(ValueError):
start = KerasTensor((2, 3))
stop = KerasTensor((2, 3, 4))
knp.linspace(start, stop)
def test_logical_and(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.logical_and(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.logical_and(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.logical_and(x, y)
def test_logical_or(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.logical_or(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.logical_or(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.logical_or(x, y)
def test_logspace(self):
start = KerasTensor((2, 3, 4))
stop = KerasTensor((2, 3, 4))
self.assertEqual(knp.logspace(start, stop, 10).shape, (10, 2, 3, 4))
with self.assertRaises(ValueError):
start = KerasTensor((2, 3))
stop = KerasTensor((2, 3, 4))
knp.logspace(start, stop)
def test_maximum(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.maximum(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.maximum(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.maximum(x, y)
def test_minimum(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.minimum(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.minimum(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.minimum(x, y)
def test_mod(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.mod(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.mod(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.mod(x, y)
def test_not_equal(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.not_equal(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.not_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.not_equal(x, y)
def test_outer(self):
x = KerasTensor((3,))
y = KerasTensor((4,))
self.assertEqual(knp.outer(x, y).shape, (3, 4))
x = KerasTensor((2, 3))
y = KerasTensor((4, 5))
self.assertEqual(knp.outer(x, y).shape, (6, 20))
x = KerasTensor((2, 3))
self.assertEqual(knp.outer(x, 2).shape, (6, 1))
def test_quantile(self):
x = KerasTensor((3, 3))
# q as scalar
q = KerasTensor(())
self.assertEqual(knp.quantile(x, q).shape, ())
# q as 1D tensor
q = KerasTensor((2,))
self.assertEqual(knp.quantile(x, q).shape, (2,))
self.assertEqual(knp.quantile(x, q, axis=1).shape, (2, 3))
self.assertEqual(
knp.quantile(x, q, axis=1, keepdims=True).shape,
(2, 3, 1),
)
def test_searchsorted(self):
a = KerasTensor((3,))
v = KerasTensor((2, 3))
self.assertEqual(knp.searchsorted(a, v).shape, v.shape)
def test_take(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.take(x, 1).shape, ())
self.assertEqual(knp.take(x, [1, 2]).shape, (2,))
self.assertEqual(knp.take(x, [[1, 2], [1, 2]], axis=1).shape, (2, 2, 2))
# test with multi-dimensional indices
x = KerasTensor((2, 3, 4, 5))
indices = KerasTensor((6, 7))
self.assertEqual(knp.take(x, indices, axis=2).shape, (2, 3, 6, 7, 5))
def test_take_along_axis(self):
x = KerasTensor((2, 3))
indices = KerasTensor((1, 3))
self.assertEqual(knp.take_along_axis(x, indices, axis=0).shape, (1, 3))
self.assertEqual(knp.take_along_axis(x, indices, axis=1).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
indices = KerasTensor((1, 4))
knp.take_along_axis(x, indices, axis=0)
def test_tensordot(self):
x = KerasTensor((2, 3, 3))
y = KerasTensor((3, 3, 4))
self.assertEqual(knp.tensordot(x, y, axes=1).shape, (2, 3, 3, 4))
self.assertEqual(knp.tensordot(x, y, axes=2).shape, (2, 4))
self.assertEqual(
knp.tensordot(x, y, axes=[[1, 2], [0, 1]]).shape, (2, 4)
)
def test_vdot(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.vdot(x, y).shape, ())
def test_inner(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.inner(x, y).shape, ())
def test_where(self):
condition = KerasTensor((2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.where(condition, x, y).shape, (2, 3))
self.assertAllEqual(knp.where(condition).shape, (2, 3))
def test_floor_divide(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.floor_divide(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.floor_divide(x, y)
def test_xor(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.logical_xor(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.logical_xor(x, y)
def test_digitize(self):
x = KerasTensor((2, 3))
bins = KerasTensor((3,))
self.assertEqual(knp.digitize(x, bins).shape, (2, 3))
self.assertTrue(knp.digitize(x, bins).dtype == "int32")
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
bins = KerasTensor((2, 3, 4))
knp.digitize(x, bins)
def test_correlate_mode_valid(self):
x = KerasTensor((3,))
y = KerasTensor((3,))
self.assertEqual(knp.correlate(x, y).shape, (1,))
self.assertTrue(knp.correlate(x, y).dtype == "float32")
with self.assertRaises(ValueError):
x = KerasTensor((3,))
y = KerasTensor((3, 4))
knp.correlate(x, y)
def test_correlate_mode_same(self):
x = KerasTensor((3,))
y = KerasTensor((3,))
self.assertEqual(knp.correlate(x, y, mode="same").shape, (3,))
self.assertTrue(knp.correlate(x, y, mode="same").dtype == "float32")
with self.assertRaises(ValueError):
x = KerasTensor((3,))
y = KerasTensor((3, 4))
knp.correlate(x, y, mode="same")
def test_correlate_mode_full(self):
x = KerasTensor((3,))
y = KerasTensor((3,))
self.assertEqual(knp.correlate(x, y, mode="full").shape, (5,))
self.assertTrue(knp.correlate(x, y, mode="full").dtype == "float32")
with self.assertRaises(ValueError):
x = KerasTensor((3))
y = KerasTensor((3, 4))
knp.correlate(x, y, mode="full")
| NumpyTwoInputOpsStaticShapeTest |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_usage.py | {
"start": 321,
"end": 1088
} | class ____(BaseModel):
cache_creation: Optional[BetaCacheCreation] = None
"""Breakdown of cached tokens by TTL"""
cache_creation_input_tokens: Optional[int] = None
"""The number of input tokens used to create the cache entry."""
cache_read_input_tokens: Optional[int] = None
"""The number of input tokens read from the cache."""
input_tokens: int
"""The number of input tokens which were used."""
output_tokens: int
"""The number of output tokens which were used."""
server_tool_use: Optional[BetaServerToolUsage] = None
"""The number of server tool requests."""
service_tier: Optional[Literal["standard", "priority", "batch"]] = None
"""If the request used the priority, standard, or batch tier."""
| BetaUsage |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 24702,
"end": 26086
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
labels = 1 if len(y.shape) == 1 else y.shape[1]
entropies = []
for i in range(labels):
occurence_dict = defaultdict(float)
for value in y if labels == 1 else y[:, i]:
occurence_dict[value] += 1
entropies.append(
scipy.stats.entropy(
[occurence_dict[key] for key in occurence_dict], base=2
)
)
return np.mean(entropies)
"""
#@metafeatures.define("normalized_class_entropy")
#@metafeatures.define("attribute_entropy")
#@metafeatures.define("normalized_attribute_entropy")
#@metafeatures.define("joint_entropy")
#@metafeatures.define("mutual_information")
#@metafeatures.define("noise-signal-ratio")
#@metafeatures.define("signal-noise-ratio")
#@metafeatures.define("equivalent_number_of_attributes")
#@metafeatures.define("conditional_entropy")
#@metafeatures.define("average_attribute_entropy")
"""
################################################################################
# Landmarking features, computed with cross validation
# These should be invoked with the same transformations of X and y with which
# sklearn will be called later on
# from Pfahringer 2000
# Linear discriminant learner
@metafeatures.define("LandmarkLDA")
| ClassEntropy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.