language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | celery__celery | celery/contrib/testing/app.py | {
"start": 555,
"end": 1057
} | class ____:
"""Trap that pretends to be an app but raises an exception instead.
This to protect from code that does not properly pass app instances,
then falls back to the current_app.
"""
def __getattr__(self, name):
# Workaround to allow unittest.mock to patch this object
# in Python 3.8 and above.
if name == '_is_coroutine' or name == '__func__':
return None
print(name)
raise RuntimeError('Test depends on current_app')
| Trap |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_compat.py | {
"start": 1395,
"end": 1701
} | class ____:
x: "Foo" = None # type: ignore
Foo.__signature__ = signature(Foo).replace( # type: ignore
parameters=[
Parameter(
"x",
Parameter.POSITIONAL_OR_KEYWORD,
annotation=ForwardRef("Foo"),
default=None,
)
]
)
@dataclass
| Foo |
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 15648,
"end": 16897
} | class ____(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
usesLocals = False
alwaysUsed = {'__tracebackhide__', '__traceback_info__',
'__traceback_supplement__', '__debuggerskip__'}
def __init__(self):
super().__init__()
# Simplify: manage the special locals as globals
self.globals = self.alwaysUsed.copy()
# {name: node}
self.indirect_assignments = {}
def unused_assignments(self):
"""
Return a generator for the assignments which have not been used.
"""
for name, binding in self.items():
if (not binding.used and
name != '_' and # see issue #202
name not in self.globals and
not self.usesLocals and
isinstance(binding, Assignment)):
yield name, binding
def unused_annotations(self):
"""
Return a generator for the annotations which have not been used.
"""
for name, binding in self.items():
if not binding.used and isinstance(binding, Annotation):
yield name, binding
| FunctionScope |
python | langchain-ai__langchain | libs/core/tests/unit_tests/test_tools.py | {
"start": 47090,
"end": 47298
} | class ____(BaseModel): # noqa: N801
"""foo."""
x: int = Field(..., description="abc")
y: Annotated[str, "foobar comment", InjectedToolArg()] = Field(
..., description="123"
)
| fooSchema |
python | langchain-ai__langchain | libs/core/langchain_core/messages/content.py | {
"start": 12023,
"end": 12619
} | class ____(TypedDict):
"""Tool call that is executed server-side.
For example: code execution, web search, etc.
"""
type: Literal["server_tool_call"]
"""Used for discrimination."""
id: str
"""An identifier associated with the tool call."""
name: str
"""The name of the tool to be called."""
args: dict[str, Any]
"""The arguments to the tool call."""
index: NotRequired[int | str]
"""Index of block in aggregate response. Used during streaming."""
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
| ServerToolCall |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_model.py | {
"start": 558,
"end": 692
} | class ____:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
| BasicModel |
python | facebookresearch__faiss | tests/test_contrib.py | {
"start": 21352,
"end": 22768
} | class ____(unittest.TestCase):
def test_sort(self):
""" make sure that the search results do not change
after sorting the inverted lists """
ds = datasets.SyntheticDataset(32, 2000, 200, 20)
index = faiss.index_factory(ds.d, "IVF50,SQ8")
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 5
Dref, Iref = index.search(ds.get_queries(), 5)
ivf_tools.sort_invlists_by_size(index)
list_sizes = ivf_tools.get_invlist_sizes(index.invlists)
assert np.all(list_sizes[1:] >= list_sizes[:-1])
Dnew, Inew = index.search(ds.get_queries(), 5)
np.testing.assert_equal(Dnew, Dref)
np.testing.assert_equal(Inew, Iref)
def test_hnsw_permute(self):
"""
make sure HNSW permutation works
(useful when used as coarse quantizer)
"""
ds = datasets.SyntheticDataset(32, 0, 1000, 50)
index = faiss.index_factory(ds.d, "HNSW32,Flat")
index.add(ds.get_database())
Dref, Iref = index.search(ds.get_queries(), 5)
rs = np.random.RandomState(1234)
perm = rs.permutation(index.ntotal)
index.permute_entries(perm)
Dnew, Inew = index.search(ds.get_queries(), 5)
np.testing.assert_equal(Dnew, Dref)
Inew_remap = perm[Inew]
np.testing.assert_equal(Inew_remap, Iref)
| TestInvlistSort |
python | Delgan__loguru | tests/conftest.py | {
"start": 4209,
"end": 4289
} | class ____(StubStream):
def isatty(self):
return True
| StreamIsattyTrue |
python | mlflow__mlflow | mlflow/genai/evaluation/constant.py | {
"start": 767,
"end": 1047
} | class ____:
REQUEST_ID = "request_id"
INPUTS = "inputs"
REQUEST = "request"
RESPONSE = "response"
OUTPUTS = "outputs"
EXPECTATIONS = "expectations"
TAGS = "tags"
TRACE = "trace"
SOURCE = "source"
# Result Dataframe column names
| InputDatasetColumn |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/events/__init__.py | {
"start": 16402,
"end": 63164
} | class ____(
NamedTuple(
"_DagsterEvent",
[
("event_type_value", str),
("job_name", str),
("step_handle", Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]]),
("node_handle", Optional[NodeHandle]),
("step_kind_value", Optional[str]),
("logging_tags", Optional[Mapping[str, str]]),
("event_specific_data", Optional["EventSpecificData"]),
("message", Optional[str]),
("pid", Optional[int]),
("step_key", Optional[str]),
],
)
):
"""Events yielded by op and job execution.
Users should not instantiate this class.
Args:
event_type_value (str): Value for a DagsterEventType.
job_name (str)
node_handle (NodeHandle)
step_kind_value (str): Value for a StepKind.
logging_tags (Dict[str, str])
event_specific_data (Any): Type must correspond to event_type_value.
message (str)
pid (int)
step_key (Optional[str]): DEPRECATED
"""
@staticmethod
def from_step(
event_type: "DagsterEventType",
step_context: IStepContext,
event_specific_data: Optional["EventSpecificData"] = None,
message: Optional[str] = None,
batch_metadata: Optional["DagsterEventBatchMetadata"] = None,
) -> "DagsterEvent":
event = DagsterEvent(
event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,
job_name=step_context.job_name,
step_handle=step_context.step.handle,
node_handle=step_context.step.node_handle,
step_kind_value=step_context.step.kind.value,
logging_tags=step_context.event_tags,
event_specific_data=_validate_event_specific_data(event_type, event_specific_data),
message=check.opt_str_param(message, "message"),
pid=os.getpid(),
)
log_step_event(step_context, event, batch_metadata)
return event
@staticmethod
def from_job(
event_type: DagsterEventType,
job_context: IPlanContext,
message: Optional[str] = None,
event_specific_data: Optional["EventSpecificData"] = None,
step_handle: Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]] = None,
) -> "DagsterEvent":
check.opt_inst_param(
step_handle, "step_handle", (StepHandle, ResolvedFromDynamicStepHandle)
)
event = DagsterEvent(
event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,
job_name=job_context.job_name,
message=check.opt_str_param(message, "message"),
event_specific_data=_validate_event_specific_data(event_type, event_specific_data),
step_handle=step_handle,
pid=os.getpid(),
)
log_job_event(job_context, event)
return event
@staticmethod
def from_resource(
event_type: DagsterEventType,
job_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
message: Optional[str] = None,
event_specific_data: Optional["EngineEventData"] = None,
) -> "DagsterEvent":
event = DagsterEvent(
event_type_value=check.inst_param(event_type, "event_type", DagsterEventType).value,
job_name=job_name,
message=check.opt_str_param(message, "message"),
event_specific_data=_validate_event_specific_data(
DagsterEventType.ENGINE_EVENT, event_specific_data
),
step_handle=execution_plan.step_handle_for_single_step_plans(),
pid=os.getpid(),
)
log_resource_event(log_manager, event)
return event
def __new__(
cls,
event_type_value: str,
job_name: str,
step_handle: Optional[Union[StepHandle, ResolvedFromDynamicStepHandle]] = None,
node_handle: Optional[NodeHandle] = None,
step_kind_value: Optional[str] = None,
logging_tags: Optional[Mapping[str, str]] = None,
event_specific_data: Optional["EventSpecificData"] = None,
message: Optional[str] = None,
pid: Optional[int] = None,
# legacy
step_key: Optional[str] = None,
):
# old events may contain node_handle but not step_handle
if node_handle is not None and step_handle is None:
step_handle = StepHandle(node_handle)
# Legacy events may have step_key set directly, preserve those to stay in sync
# with legacy execution plan snapshots.
if step_handle is not None and step_key is None:
step_key = step_handle.to_key()
return super().__new__(
cls,
check.str_param(event_type_value, "event_type_value"),
check.str_param(job_name, "job_name"),
check.opt_inst_param(
step_handle, "step_handle", (StepHandle, ResolvedFromDynamicStepHandle)
),
check.opt_inst_param(node_handle, "node_handle", NodeHandle),
check.opt_str_param(step_kind_value, "step_kind_value"),
check.opt_mapping_param(logging_tags, "logging_tags"),
_validate_event_specific_data(DagsterEventType(event_type_value), event_specific_data),
check.opt_str_param(message, "message"),
check.opt_int_param(pid, "pid"),
check.opt_str_param(step_key, "step_key"),
)
@property
def node_name(self) -> str:
check.invariant(self.node_handle is not None)
node_handle = cast("NodeHandle", self.node_handle)
return node_handle.name
@public
@property
def event_type(self) -> DagsterEventType:
"""DagsterEventType: The type of this event."""
return DagsterEventType(self.event_type_value)
@public
@property
def is_step_event(self) -> bool:
"""bool: If this event relates to a specific step."""
return self.event_type in STEP_EVENTS
@public
@property
def is_hook_event(self) -> bool:
"""bool: If this event relates to the execution of a hook."""
return self.event_type in HOOK_EVENTS
@property
def is_alert_event(self) -> bool:
return self.event_type in ALERT_EVENTS
@property
def step_kind(self) -> "StepKind":
from dagster._core.execution.plan.step import StepKind
return StepKind(self.step_kind_value)
@public
@property
def is_step_success(self) -> bool:
"""bool: If this event is of type STEP_SUCCESS."""
return self.event_type == DagsterEventType.STEP_SUCCESS
@public
@property
def is_successful_output(self) -> bool:
"""bool: If this event is of type STEP_OUTPUT."""
return self.event_type == DagsterEventType.STEP_OUTPUT
@public
@property
def is_step_start(self) -> bool:
"""bool: If this event is of type STEP_START."""
return self.event_type == DagsterEventType.STEP_START
@public
@property
def is_step_failure(self) -> bool:
"""bool: If this event is of type STEP_FAILURE."""
return self.event_type == DagsterEventType.STEP_FAILURE
@public
@property
def is_resource_init_failure(self) -> bool:
"""bool: If this event is of type RESOURCE_INIT_FAILURE."""
return self.event_type == DagsterEventType.RESOURCE_INIT_FAILURE
@public
@property
def is_step_skipped(self) -> bool:
"""bool: If this event is of type STEP_SKIPPED."""
return self.event_type == DagsterEventType.STEP_SKIPPED
@public
@property
def is_step_up_for_retry(self) -> bool:
"""bool: If this event is of type STEP_UP_FOR_RETRY."""
return self.event_type == DagsterEventType.STEP_UP_FOR_RETRY
@public
@property
def is_step_restarted(self) -> bool:
"""bool: If this event is of type STEP_RESTARTED."""
return self.event_type == DagsterEventType.STEP_RESTARTED
@property
def is_job_success(self) -> bool:
return self.event_type == DagsterEventType.RUN_SUCCESS
@property
def is_job_failure(self) -> bool:
return self.event_type == DagsterEventType.RUN_FAILURE
@property
def is_run_failure(self) -> bool:
return self.event_type == DagsterEventType.RUN_FAILURE
@public
@property
def is_failure(self) -> bool:
"""bool: If this event represents the failure of a run or step."""
return self.event_type in FAILURE_EVENTS
@property
def is_job_event(self) -> bool:
return self.event_type in PIPELINE_EVENTS
@public
@property
def is_engine_event(self) -> bool:
"""bool: If this event is of type ENGINE_EVENT."""
return self.event_type == DagsterEventType.ENGINE_EVENT
@public
@property
def is_handled_output(self) -> bool:
"""bool: If this event is of type HANDLED_OUTPUT."""
return self.event_type == DagsterEventType.HANDLED_OUTPUT
@public
@property
def is_loaded_input(self) -> bool:
"""bool: If this event is of type LOADED_INPUT."""
return self.event_type == DagsterEventType.LOADED_INPUT
@public
@property
def is_step_materialization(self) -> bool:
"""bool: If this event is of type ASSET_MATERIALIZATION."""
return self.event_type == DagsterEventType.ASSET_MATERIALIZATION
@public
@property
def is_expectation_result(self) -> bool:
"""bool: If this event is of type STEP_EXPECTATION_RESULT."""
return self.event_type == DagsterEventType.STEP_EXPECTATION_RESULT
@public
@property
def is_asset_observation(self) -> bool:
"""bool: If this event is of type ASSET_OBSERVATION."""
return self.event_type == DagsterEventType.ASSET_OBSERVATION
@public
@property
def is_asset_materialization_planned(self) -> bool:
"""bool: If this event is of type ASSET_MATERIALIZATION_PLANNED."""
return self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED
@property
def is_asset_failed_to_materialize(self) -> bool:
"""bool: If this event is of type ASSET_FAILED_TO_MATERIALIZE."""
return self.event_type == DagsterEventType.ASSET_FAILED_TO_MATERIALIZE
@public
@property
def asset_key(self) -> Optional[AssetKey]:
"""Optional[AssetKey]: For events that correspond to a specific asset_key / partition
(ASSET_MATERIALIZTION, ASSET_OBSERVATION, ASSET_MATERIALIZATION_PLANNED), returns that
asset key. Otherwise, returns None.
"""
if self.event_type == DagsterEventType.ASSET_MATERIALIZATION:
return self.step_materialization_data.materialization.asset_key
elif self.event_type == DagsterEventType.ASSET_OBSERVATION:
return self.asset_observation_data.asset_observation.asset_key
elif self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:
return self.asset_materialization_planned_data.asset_key
elif self.event_type == DagsterEventType.ASSET_FAILED_TO_MATERIALIZE:
return self.asset_failed_to_materialize_data.asset_key
elif self.event_type == DagsterEventType.FRESHNESS_STATE_CHANGE:
return self.asset_freshness_state_change_data.key
elif self.event_type == DagsterEventType.ASSET_HEALTH_CHANGED:
return self.asset_health_changed_data.asset_key
elif self.event_type == DagsterEventType.ASSET_WIPED:
return self.asset_wiped_data.asset_key
else:
return None
@public
@property
def partition(self) -> Optional[str]:
"""Optional[AssetKey]: For events that correspond to a specific asset_key / partition
(ASSET_MATERIALIZTION, ASSET_OBSERVATION, ASSET_MATERIALIZATION_PLANNED), returns that
partition. Otherwise, returns None.
"""
if self.event_type == DagsterEventType.ASSET_MATERIALIZATION:
return self.step_materialization_data.materialization.partition
elif self.event_type == DagsterEventType.ASSET_OBSERVATION:
return self.asset_observation_data.asset_observation.partition
elif self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:
return self.asset_materialization_planned_data.partition
elif self.event_type == DagsterEventType.ASSET_FAILED_TO_MATERIALIZE:
return self.asset_failed_to_materialize_data.partition
else:
return None
@property
def partitions_subset(self) -> Optional[PartitionsSubset]:
if self.event_type == DagsterEventType.ASSET_MATERIALIZATION_PLANNED:
return self.asset_materialization_planned_data.partitions_subset
return None
@property
def step_input_data(self) -> "StepInputData":
_assert_type("step_input_data", DagsterEventType.STEP_INPUT, self.event_type)
return cast("StepInputData", self.event_specific_data)
@property
def run_enqueued_data(self) -> Optional["RunEnqueuedData"]:
_assert_type("run_enqueued_data", DagsterEventType.RUN_ENQUEUED, self.event_type)
return cast("Optional[RunEnqueuedData]", self.event_specific_data)
@property
def step_output_data(self) -> StepOutputData:
_assert_type("step_output_data", DagsterEventType.STEP_OUTPUT, self.event_type)
return cast("StepOutputData", self.event_specific_data)
@property
def step_success_data(self) -> "StepSuccessData":
_assert_type("step_success_data", DagsterEventType.STEP_SUCCESS, self.event_type)
return cast("StepSuccessData", self.event_specific_data)
@property
def step_failure_data(self) -> "StepFailureData":
_assert_type("step_failure_data", DagsterEventType.STEP_FAILURE, self.event_type)
return cast("StepFailureData", self.event_specific_data)
@property
def step_retry_data(self) -> "StepRetryData":
_assert_type("step_retry_data", DagsterEventType.STEP_UP_FOR_RETRY, self.event_type)
return cast("StepRetryData", self.event_specific_data)
@property
def step_materialization_data(self) -> "StepMaterializationData":
_assert_type(
"step_materialization_data", DagsterEventType.ASSET_MATERIALIZATION, self.event_type
)
return cast("StepMaterializationData", self.event_specific_data)
@property
def asset_observation_data(self) -> "AssetObservationData":
_assert_type("asset_observation_data", DagsterEventType.ASSET_OBSERVATION, self.event_type)
return cast("AssetObservationData", self.event_specific_data)
@property
def asset_materialization_planned_data(self) -> "AssetMaterializationPlannedData":
_assert_type(
"asset_materialization_planned",
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
self.event_type,
)
return cast("AssetMaterializationPlannedData", self.event_specific_data)
@property
def asset_check_planned_data(self) -> "AssetCheckEvaluationPlanned":
_assert_type(
"asset_check_planned",
DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED,
self.event_type,
)
return cast("AssetCheckEvaluationPlanned", self.event_specific_data)
@property
def asset_failed_to_materialize_data(
self,
) -> "AssetFailedToMaterializeData":
_assert_type(
"asset_failed_to_materialize_data",
DagsterEventType.ASSET_FAILED_TO_MATERIALIZE,
self.event_type,
)
return cast("AssetFailedToMaterializeData", self.event_specific_data)
@property
def asset_freshness_state_change_data(
self,
) -> "FreshnessStateChange":
_assert_type(
"asset_freshness_state_change_data",
DagsterEventType.FRESHNESS_STATE_CHANGE,
self.event_type,
)
return cast("FreshnessStateChange", self.event_specific_data)
@property
def asset_health_changed_data(
self,
) -> "AssetHealthChangedData":
_assert_type(
"asset_health_changed_data",
DagsterEventType.ASSET_HEALTH_CHANGED,
self.event_type,
)
return cast("AssetHealthChangedData", self.event_specific_data)
@property
def asset_wiped_data(
self,
) -> "AssetWipedData":
_assert_type(
"asset_wiped_data",
DagsterEventType.ASSET_WIPED,
self.event_type,
)
return cast("AssetWipedData", self.event_specific_data)
@property
def step_expectation_result_data(self) -> "StepExpectationResultData":
_assert_type(
"step_expectation_result_data",
DagsterEventType.STEP_EXPECTATION_RESULT,
self.event_type,
)
return cast("StepExpectationResultData", self.event_specific_data)
@property
def materialization(self) -> AssetMaterialization:
_assert_type(
"step_materialization_data", DagsterEventType.ASSET_MATERIALIZATION, self.event_type
)
return cast("StepMaterializationData", self.event_specific_data).materialization
@property
def asset_check_evaluation_data(self) -> AssetCheckEvaluation:
_assert_type(
"asset_check_evaluation", DagsterEventType.ASSET_CHECK_EVALUATION, self.event_type
)
return cast("AssetCheckEvaluation", self.event_specific_data)
@property
def job_failure_data(self) -> "JobFailureData":
_assert_type("job_failure_data", DagsterEventType.RUN_FAILURE, self.event_type)
return cast("JobFailureData", self.event_specific_data)
@property
def job_canceled_data(self) -> "JobCanceledData":
_assert_type("job_canceled_data", DagsterEventType.RUN_CANCELED, self.event_type)
return cast("JobCanceledData", self.event_specific_data)
@property
def engine_event_data(self) -> "EngineEventData":
_assert_type(
"engine_event_data",
[
DagsterEventType.ENGINE_EVENT,
DagsterEventType.RESOURCE_INIT_STARTED,
DagsterEventType.RESOURCE_INIT_SUCCESS,
DagsterEventType.RESOURCE_INIT_FAILURE,
DagsterEventType.STEP_WORKER_STARTED,
DagsterEventType.STEP_WORKER_STARTING,
],
self.event_type,
)
return cast("EngineEventData", self.event_specific_data)
@property
def hook_completed_data(self) -> Optional["EventSpecificData"]:
_assert_type("hook_completed_data", DagsterEventType.HOOK_COMPLETED, self.event_type)
return self.event_specific_data
@property
def hook_errored_data(self) -> "HookErroredData":
_assert_type("hook_errored_data", DagsterEventType.HOOK_ERRORED, self.event_type)
return cast("HookErroredData", self.event_specific_data)
@property
def hook_skipped_data(self) -> Optional["EventSpecificData"]:
_assert_type("hook_skipped_data", DagsterEventType.HOOK_SKIPPED, self.event_type)
return self.event_specific_data
@property
def logs_captured_data(self) -> "ComputeLogsCaptureData":
_assert_type("logs_captured_data", DagsterEventType.LOGS_CAPTURED, self.event_type)
return cast("ComputeLogsCaptureData", self.event_specific_data)
@staticmethod
def step_output_event(
step_context: StepExecutionContext, step_output_data: StepOutputData
) -> "DagsterEvent":
output_def = step_context.op.output_def_named(
step_output_data.step_output_handle.output_name
)
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_OUTPUT,
step_context=step_context,
event_specific_data=step_output_data,
message=(
'Yielded output "{output_name}"{mapping_clause} of type'
' "{output_type}".{type_check_clause}'.format(
output_name=step_output_data.step_output_handle.output_name,
output_type=output_def.dagster_type.display_name,
type_check_clause=(
(
" Warning! Type check failed."
if not step_output_data.type_check_data.success
else " (Type check passed)."
)
if step_output_data.type_check_data
else " (No type check)."
),
mapping_clause=(
f' mapping key "{step_output_data.step_output_handle.mapping_key}"'
if step_output_data.step_output_handle.mapping_key
else ""
),
)
),
)
@staticmethod
def step_failure_event(
step_context: IStepContext,
step_failure_data: "StepFailureData",
message=None,
) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_FAILURE,
step_context=step_context,
event_specific_data=step_failure_data,
message=(message or f'Execution of step "{step_context.step.key}" failed.'),
)
@staticmethod
def step_retry_event(
step_context: IStepContext, step_retry_data: "StepRetryData"
) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_UP_FOR_RETRY,
step_context=step_context,
event_specific_data=step_retry_data,
message=(
'Execution of step "{step_key}" failed and has requested a retry{wait_str}.'.format(
step_key=step_context.step.key,
wait_str=(
f" in {step_retry_data.seconds_to_wait} seconds"
if step_retry_data.seconds_to_wait
else ""
),
)
),
)
@staticmethod
def step_input_event(
step_context: StepExecutionContext, step_input_data: "StepInputData"
) -> "DagsterEvent":
input_def = step_context.op_def.input_def_named(step_input_data.input_name)
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_INPUT,
step_context=step_context,
event_specific_data=step_input_data,
message='Got input "{input_name}" of type "{input_type}".{type_check_clause}'.format(
input_name=step_input_data.input_name,
input_type=input_def.dagster_type.display_name,
type_check_clause=(
(
" Warning! Type check failed."
if not step_input_data.type_check_data.success
else " (Type check passed)."
)
if step_input_data.type_check_data
else " (No type check)."
),
),
)
@staticmethod
def step_start_event(step_context: IStepContext) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_START,
step_context=step_context,
message=f'Started execution of step "{step_context.step.key}".',
)
@staticmethod
def step_restarted_event(step_context: IStepContext, previous_attempts: int) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_RESTARTED,
step_context=step_context,
message=f'Started re-execution (attempt # {previous_attempts + 1}) of step "{step_context.step.key}".',
)
@staticmethod
def step_success_event(
step_context: IStepContext, success: "StepSuccessData"
) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_SUCCESS,
step_context=step_context,
event_specific_data=success,
message=f'Finished execution of step "{step_context.step.key}" in {format_duration(success.duration_ms)}.',
)
@staticmethod
def step_skipped_event(step_context: IStepContext) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_SKIPPED,
step_context=step_context,
message=f'Skipped execution of step "{step_context.step.key}".',
)
@staticmethod
def asset_materialization(
step_context: IStepContext,
materialization: AssetMaterialization,
batch_metadata: Optional[DagsterEventBatchMetadata] = None,
) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.ASSET_MATERIALIZATION,
step_context=step_context,
event_specific_data=StepMaterializationData(materialization),
message=(
materialization.description
if materialization.description
else "Materialized value{label_clause}.".format(
label_clause=f" {materialization.label}" if materialization.label else ""
)
),
batch_metadata=batch_metadata,
)
@staticmethod
def asset_observation(
step_context: IStepContext,
observation: AssetObservation,
batch_metadata: Optional[DagsterEventBatchMetadata] = None,
) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.ASSET_OBSERVATION,
step_context=step_context,
event_specific_data=AssetObservationData(observation),
batch_metadata=batch_metadata,
)
@staticmethod
def asset_check_evaluation(
step_context: IStepContext, asset_check_evaluation: AssetCheckEvaluation
) -> "DagsterEvent":
return DagsterEvent.from_step(
event_type=DagsterEventType.ASSET_CHECK_EVALUATION,
step_context=step_context,
event_specific_data=asset_check_evaluation,
message=f"Asset check '{asset_check_evaluation.check_name}' on '{asset_check_evaluation.asset_key.to_user_string()}' "
+ ("passed." if asset_check_evaluation.passed else "did not pass.")
+ (
""
if asset_check_evaluation.description is None
else f" Description: '{asset_check_evaluation.description}'"
),
)
@staticmethod
def step_expectation_result(
step_context: IStepContext, expectation_result: ExpectationResult
) -> "DagsterEvent":
def _msg():
if expectation_result.description:
return expectation_result.description
return "Expectation{label_clause} {result_verb}".format(
label_clause=" " + expectation_result.label if expectation_result.label else "",
result_verb="passed" if expectation_result.success else "failed",
)
return DagsterEvent.from_step(
event_type=DagsterEventType.STEP_EXPECTATION_RESULT,
step_context=step_context,
event_specific_data=StepExpectationResultData(expectation_result),
message=_msg(),
)
@staticmethod
def step_concurrency_blocked(
step_context: IStepContext, concurrency_key: str, initial=True
) -> "DagsterEvent":
message = (
f"Step blocked by limit for pool {concurrency_key}"
if initial
else f"Step still blocked by limit for pool {concurrency_key}"
)
return DagsterEvent.from_step(
event_type=DagsterEventType.ENGINE_EVENT,
step_context=step_context,
message=message,
event_specific_data=EngineEventData(
metadata={"pool": MetadataValue.pool(concurrency_key)}
),
)
@staticmethod
def job_enqueue(run: DagsterRun) -> "DagsterEvent":
remote_job_origin = run.remote_job_origin
if remote_job_origin:
loc_name = remote_job_origin.location_name
repo_name = remote_job_origin.repository_origin.repository_name
event_data = RunEnqueuedData(
code_location_name=loc_name,
repository_name=repo_name,
partition_key=run.tags.get(PARTITION_NAME_TAG),
)
else:
event_data = None
return DagsterEvent(
event_type_value=DagsterEventType.RUN_ENQUEUED.value,
job_name=run.job_name,
event_specific_data=event_data,
)
@staticmethod
def job_start(job_context: IPlanContext) -> "DagsterEvent":
return DagsterEvent.from_job(
DagsterEventType.RUN_START,
job_context,
message=f'Started execution of run for "{job_context.job_name}".',
)
@staticmethod
def job_success(job_context: IPlanContext) -> "DagsterEvent":
return DagsterEvent.from_job(
DagsterEventType.RUN_SUCCESS,
job_context,
message=f'Finished execution of run for "{job_context.job_name}".',
)
@staticmethod
def job_failure(
job_context_or_name: Union[IPlanContext, str],
context_msg: str,
failure_reason: RunFailureReason,
error_info: Optional[SerializableErrorInfo] = None,
first_step_failure_event: Optional["DagsterEvent"] = None,
) -> "DagsterEvent":
check.str_param(context_msg, "context_msg")
if isinstance(job_context_or_name, IPlanContext):
return DagsterEvent.from_job(
DagsterEventType.RUN_FAILURE,
job_context_or_name,
message=(
f'Execution of run for "{job_context_or_name.job_name}" failed. {context_msg}'
),
event_specific_data=JobFailureData(
error_info,
failure_reason=failure_reason,
first_step_failure_event=first_step_failure_event,
),
)
else:
# when the failure happens trying to bring up context, the job_context hasn't been
# built and so can't use from_pipeline
check.str_param(job_context_or_name, "pipeline_name")
event = DagsterEvent(
event_type_value=DagsterEventType.RUN_FAILURE.value,
job_name=job_context_or_name,
event_specific_data=JobFailureData(error_info, failure_reason=failure_reason),
message=f'Execution of run for "{job_context_or_name}" failed. {context_msg}',
pid=os.getpid(),
)
return event
@staticmethod
def job_canceled(
job_context: IPlanContext,
error_info: Optional[SerializableErrorInfo] = None,
message: Optional[str] = None,
) -> "DagsterEvent":
return DagsterEvent.from_job(
DagsterEventType.RUN_CANCELED,
job_context,
message=message or f'Execution of run for "{job_context.job_name}" canceled.',
event_specific_data=JobCanceledData(
check.opt_inst_param(error_info, "error_info", SerializableErrorInfo)
),
)
@staticmethod
def step_worker_starting(
step_context: IStepContext,
message: str,
metadata: Mapping[str, RawMetadataValue],
) -> "DagsterEvent":
return DagsterEvent.from_step(
DagsterEventType.STEP_WORKER_STARTING,
step_context,
message=message,
event_specific_data=EngineEventData(
metadata=metadata, marker_start="step_process_start"
),
)
@staticmethod
def step_worker_started(
log_manager: DagsterLogManager,
job_name: str,
message: str,
metadata: Mapping[str, RawMetadataValue],
step_key: Optional[str],
) -> "DagsterEvent":
event = DagsterEvent(
DagsterEventType.STEP_WORKER_STARTED.value,
job_name=job_name,
message=message,
event_specific_data=EngineEventData(metadata=metadata, marker_end="step_process_start"),
pid=os.getpid(),
step_key=step_key,
)
log_manager.log_dagster_event(
level=logging.DEBUG,
msg=message,
dagster_event=event,
)
return event
@staticmethod
def resource_init_start(
job_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
resource_keys: AbstractSet[str],
) -> "DagsterEvent":
return DagsterEvent.from_resource(
DagsterEventType.RESOURCE_INIT_STARTED,
job_name=job_name,
execution_plan=execution_plan,
log_manager=log_manager,
message="Starting initialization of resources [{}].".format(
", ".join(sorted(resource_keys))
),
event_specific_data=EngineEventData(metadata={}, marker_start="resources"),
)
@staticmethod
def resource_init_success(
job_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
resource_instances: Mapping[str, Any],
resource_init_times: Mapping[str, str],
) -> "DagsterEvent":
metadata = {}
for key in resource_instances.keys():
metadata[key] = MetadataValue.python_artifact(resource_instances[key].__class__)
metadata[f"{key}:init_time"] = resource_init_times[key]
return DagsterEvent.from_resource(
DagsterEventType.RESOURCE_INIT_SUCCESS,
job_name=job_name,
execution_plan=execution_plan,
log_manager=log_manager,
message="Finished initialization of resources [{}].".format(
", ".join(sorted(resource_init_times.keys()))
),
event_specific_data=EngineEventData(
metadata=metadata,
marker_end="resources",
),
)
@staticmethod
def resource_init_failure(
job_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
resource_keys: AbstractSet[str],
error: SerializableErrorInfo,
) -> "DagsterEvent":
return DagsterEvent.from_resource(
DagsterEventType.RESOURCE_INIT_FAILURE,
job_name=job_name,
execution_plan=execution_plan,
log_manager=log_manager,
message="Initialization of resources [{}] failed.".format(", ".join(resource_keys)),
event_specific_data=EngineEventData(
metadata={},
marker_end="resources",
error=error,
),
)
@staticmethod
def resource_teardown_failure(
job_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
resource_keys: AbstractSet[str],
error: SerializableErrorInfo,
) -> "DagsterEvent":
return DagsterEvent.from_resource(
DagsterEventType.ENGINE_EVENT,
job_name=job_name,
execution_plan=execution_plan,
log_manager=log_manager,
message="Teardown of resources [{}] failed.".format(", ".join(resource_keys)),
event_specific_data=EngineEventData(
metadata={},
marker_start=None,
marker_end=None,
error=error,
),
)
@staticmethod
def engine_event(
plan_context: IPlanContext,
message: str,
event_specific_data: Optional["EngineEventData"] = None,
) -> "DagsterEvent":
if isinstance(plan_context, IStepContext):
return DagsterEvent.from_step(
DagsterEventType.ENGINE_EVENT,
step_context=plan_context,
event_specific_data=event_specific_data,
message=message,
)
else:
return DagsterEvent.from_job(
DagsterEventType.ENGINE_EVENT,
plan_context,
message,
event_specific_data=event_specific_data,
)
@staticmethod
def object_store_operation(
step_context: IStepContext, object_store_operation_result: "ObjectStoreOperation"
) -> "DagsterEvent":
object_store_name = (
f"{object_store_operation_result.object_store_name} "
if object_store_operation_result.object_store_name
else ""
)
serialization_strategy_modifier = (
f" using {object_store_operation_result.serialization_strategy_name}"
if object_store_operation_result.serialization_strategy_name
else ""
)
value_name = object_store_operation_result.value_name
if (
ObjectStoreOperationType(object_store_operation_result.op)
== ObjectStoreOperationType.SET_OBJECT
):
message = (
f"Stored intermediate object for output {value_name} in "
f"{object_store_name}object store{serialization_strategy_modifier}."
)
elif (
ObjectStoreOperationType(object_store_operation_result.op)
== ObjectStoreOperationType.GET_OBJECT
):
message = (
f"Retrieved intermediate object for input {value_name} in "
f"{object_store_name}object store{serialization_strategy_modifier}."
)
elif (
ObjectStoreOperationType(object_store_operation_result.op)
== ObjectStoreOperationType.CP_OBJECT
):
message = f"Copied intermediate object for input {value_name} from {object_store_operation_result.key} to {object_store_operation_result.dest_key}"
else:
message = ""
return DagsterEvent.from_step(
DagsterEventType.OBJECT_STORE_OPERATION,
step_context,
event_specific_data=ObjectStoreOperationResultData(
op=object_store_operation_result.op,
value_name=value_name,
address=object_store_operation_result.key,
metadata={"key": MetadataValue.path(object_store_operation_result.key)},
version=object_store_operation_result.version,
mapping_key=object_store_operation_result.mapping_key,
),
message=message,
)
@staticmethod
def handled_output(
step_context: IStepContext,
output_name: str,
manager_key: str,
message_override: Optional[str] = None,
metadata: Optional[Mapping[str, MetadataValue]] = None,
) -> "DagsterEvent":
message = f'Handled output "{output_name}" using IO manager "{manager_key}"'
return DagsterEvent.from_step(
event_type=DagsterEventType.HANDLED_OUTPUT,
step_context=step_context,
event_specific_data=HandledOutputData(
output_name=output_name,
manager_key=manager_key,
metadata=metadata if metadata else {},
),
message=message_override or message,
)
@staticmethod
def loaded_input(
step_context: IStepContext,
input_name: str,
manager_key: str,
upstream_output_name: Optional[str] = None,
upstream_step_key: Optional[str] = None,
message_override: Optional[str] = None,
metadata: Optional[Mapping[str, MetadataValue]] = None,
) -> "DagsterEvent":
message = f'Loaded input "{input_name}" using input manager "{manager_key}"'
if upstream_output_name:
message += f', from output "{upstream_output_name}" of step "{upstream_step_key}"'
return DagsterEvent.from_step(
event_type=DagsterEventType.LOADED_INPUT,
step_context=step_context,
event_specific_data=LoadedInputData(
input_name=input_name,
manager_key=manager_key,
upstream_output_name=upstream_output_name,
upstream_step_key=upstream_step_key,
metadata=metadata if metadata else {},
),
message=message_override or message,
)
@staticmethod
def hook_completed(
step_context: StepExecutionContext, hook_def: HookDefinition
) -> "DagsterEvent":
event_type = DagsterEventType.HOOK_COMPLETED
event = DagsterEvent(
event_type_value=event_type.value,
job_name=step_context.job_name,
step_handle=step_context.step.handle,
node_handle=step_context.step.node_handle,
step_kind_value=step_context.step.kind.value,
logging_tags=step_context.event_tags,
message=(
f'Finished the execution of hook "{hook_def.name}" triggered for'
f' "{step_context.op.name}".'
),
)
step_context.log.log_dagster_event(
level=logging.DEBUG, msg=event.message or "", dagster_event=event
)
return event
@staticmethod
def hook_errored(
step_context: StepExecutionContext, error: HookExecutionError
) -> "DagsterEvent":
event_type = DagsterEventType.HOOK_ERRORED
event = DagsterEvent(
event_type_value=event_type.value,
job_name=step_context.job_name,
step_handle=step_context.step.handle,
node_handle=step_context.step.node_handle,
step_kind_value=step_context.step.kind.value,
logging_tags=step_context.event_tags,
event_specific_data=_validate_event_specific_data(
event_type,
HookErroredData(
error=serializable_error_info_from_exc_info(error.original_exc_info)
),
),
)
step_context.log.log_dagster_event(level=logging.ERROR, msg=str(error), dagster_event=event)
return event
@staticmethod
def hook_skipped(
step_context: StepExecutionContext, hook_def: HookDefinition
) -> "DagsterEvent":
event_type = DagsterEventType.HOOK_SKIPPED
event = DagsterEvent(
event_type_value=event_type.value,
job_name=step_context.job_name,
step_handle=step_context.step.handle,
node_handle=step_context.step.node_handle,
step_kind_value=step_context.step.kind.value,
logging_tags=step_context.event_tags,
message=(
f'Skipped the execution of hook "{hook_def.name}". It did not meet its triggering '
f'condition during the execution of "{step_context.op.name}".'
),
)
step_context.log.log_dagster_event(
level=logging.DEBUG, msg=event.message or "", dagster_event=event
)
return event
@staticmethod
def legacy_compute_log_step_event(step_context: StepExecutionContext):
step_key = step_context.step.key
return DagsterEvent.from_step(
DagsterEventType.LOGS_CAPTURED,
step_context,
message=f"Started capturing logs for step: {step_key}.",
event_specific_data=ComputeLogsCaptureData(
step_keys=[step_key],
file_key=step_key,
),
)
@staticmethod
def capture_logs(
job_context: IPlanContext,
step_keys: Sequence[str],
log_key: Sequence[str],
log_context: CapturedLogContext,
):
file_key = log_key[-1]
return DagsterEvent.from_job(
DagsterEventType.LOGS_CAPTURED,
job_context,
message=f"Started capturing logs in process (pid: {os.getpid()}).",
event_specific_data=ComputeLogsCaptureData(
step_keys=step_keys,
file_key=file_key,
external_stdout_url=log_context.external_stdout_url,
external_stderr_url=log_context.external_stderr_url,
shell_cmd=log_context.shell_cmd,
external_url=log_context.external_url,
),
)
@staticmethod
def build_asset_materialization_planned_event(
job_name: str,
step_key: str,
asset_materialization_planned_data: "AssetMaterializationPlannedData",
) -> "DagsterEvent":
"""Constructs an asset materialization planned event, to be logged by the caller."""
event = DagsterEvent(
event_type_value=DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value,
job_name=job_name,
message=(
f"{job_name} intends to materialize asset {asset_materialization_planned_data.asset_key.to_string()}"
),
event_specific_data=asset_materialization_planned_data,
step_key=step_key,
)
return event
@staticmethod
def build_asset_failed_to_materialize_event(
job_name: str,
step_key: Optional[str],
asset_materialization_failure: "AssetMaterializationFailure",
error: Optional[SerializableErrorInfo] = None,
) -> "DagsterEvent":
return DagsterEvent(
event_type_value=DagsterEventType.ASSET_FAILED_TO_MATERIALIZE.value,
job_name=job_name,
message=f"Asset {asset_materialization_failure.asset_key.to_string()} failed to materialize",
event_specific_data=AssetFailedToMaterializeData(
asset_materialization_failure, error=error
),
step_key=step_key,
)
def get_step_output_event(
events: Sequence[DagsterEvent], step_key: str, output_name: Optional[str] = "result"
) -> Optional["DagsterEvent"]:
check.sequence_param(events, "events", of_type=DagsterEvent)
check.str_param(step_key, "step_key")
check.str_param(output_name, "output_name")
for event in events:
if (
event.event_type == DagsterEventType.STEP_OUTPUT
and event.step_key == step_key
and event.step_output_data.output_name == output_name
):
return event
return None
@whitelist_for_serdes
| DagsterEvent |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/dashboard.py | {
"start": 47704,
"end": 48073
} | class ____(DashboardDetailsSerializer):
title = serializers.CharField(
required=True, max_length=255, help_text="The user defined title for this dashboard."
)
is_favorited = serializers.BooleanField(
required=False,
default=False,
help_text="Favorite the dashboard automatically for the request user",
)
| DashboardSerializer |
python | huggingface__transformers | src/transformers/models/owlvit/configuration_owlvit.py | {
"start": 784,
"end": 5502
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an
OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the OwlViT
[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`OwlViTTextModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 16):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token in the input sequences.
bos_token_id (`int`, *optional*, defaults to 49406):
The id of the beginning-of-sequence token in the input sequences.
eos_token_id (`int`, *optional*, defaults to 49407):
The id of the end-of-sequence token in the input sequences.
Example:
```python
>>> from transformers import OwlViTTextConfig, OwlViTTextModel
>>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration
>>> configuration = OwlViTTextConfig()
>>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration
>>> model = OwlViTTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "owlvit_text_model"
base_config_key = "text_config"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=16,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
pad_token_id=0,
bos_token_id=49406,
eos_token_id=49407,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
| OwlViTTextConfig |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 43358,
"end": 49340
} | class ____:
def setup_method(self):
np.random.seed(1234)
self.x = np.random.randn(100, 1)
self.ckdt = self.kdtree_type(self.x)
def test_return_sorted_True(self):
idxs_list = self.ckdt.query_ball_point(self.x, 1., return_sorted=True)
for idxs in idxs_list:
assert_array_equal(idxs, sorted(idxs))
for xi in self.x:
idxs = self.ckdt.query_ball_point(xi, 1., return_sorted=True)
assert_array_equal(idxs, sorted(idxs))
def test_return_sorted_None(self):
"""Previous behavior was to sort the returned indices if there were
multiple points per query but not sort them if there was a single point
per query."""
idxs_list = self.ckdt.query_ball_point(self.x, 1.)
for idxs in idxs_list:
assert_array_equal(idxs, sorted(idxs))
idxs_list_single = [self.ckdt.query_ball_point(xi, 1.) for xi in self.x]
idxs_list_False = self.ckdt.query_ball_point(self.x, 1., return_sorted=False)
for idxs0, idxs1 in zip(idxs_list_False, idxs_list_single):
assert_array_equal(idxs0, idxs1)
def test_kdtree_complex_data():
# Test that KDTree rejects complex input points (gh-9108)
points = np.random.rand(10, 2).view(complex)
with pytest.raises(TypeError, match="complex data"):
t = KDTree(points)
t = KDTree(points.real)
with pytest.raises(TypeError, match="complex data"):
t.query(points)
with pytest.raises(TypeError, match="complex data"):
t.query_ball_point(points, r=1)
def test_kdtree_tree_access():
# Test KDTree.tree can be used to traverse the KDTree
np.random.seed(1234)
points = np.random.rand(100, 4)
t = KDTree(points)
root = t.tree
assert isinstance(root, KDTree.innernode)
assert root.children == points.shape[0]
# Visit the tree and assert some basic properties for each node
nodes = [root]
while nodes:
n = nodes.pop(-1)
if isinstance(n, KDTree.leafnode):
assert isinstance(n.children, int)
assert n.children == len(n.idx)
assert_array_equal(points[n.idx], n._node.data_points)
else:
assert isinstance(n, KDTree.innernode)
assert isinstance(n.split_dim, int)
assert 0 <= n.split_dim < t.m
assert isinstance(n.split, float)
assert isinstance(n.children, int)
assert n.children == n.less.children + n.greater.children
nodes.append(n.greater)
nodes.append(n.less)
def test_kdtree_attributes():
# Test KDTree's attributes are available
np.random.seed(1234)
points = np.random.rand(100, 4)
t = KDTree(points)
assert isinstance(t.m, int)
assert t.n == points.shape[0]
assert isinstance(t.n, int)
assert t.m == points.shape[1]
assert isinstance(t.leafsize, int)
assert t.leafsize == 10
assert_array_equal(t.maxes, np.amax(points, axis=0))
assert_array_equal(t.mins, np.amin(points, axis=0))
assert t.data is points
@pytest.mark.parametrize("kdtree_class", [KDTree, cKDTree])
def test_kdtree_count_neighbors_weighted(kdtree_class):
rng = np.random.RandomState(1234)
r = np.arange(0.05, 1, 0.05)
A = rng.random(21).reshape((7,3))
B = rng.random(45).reshape((15,3))
wA = rng.random(7)
wB = rng.random(15)
kdA = kdtree_class(A)
kdB = kdtree_class(B)
nAB = kdA.count_neighbors(kdB, r, cumulative=False, weights=(wA,wB))
# Compare against brute-force
weights = wA[None, :] * wB[:, None]
dist = np.linalg.norm(A[None, :, :] - B[:, None, :], axis=-1)
expect = [np.sum(weights[(prev_radius < dist) & (dist <= radius)])
for prev_radius, radius in zip(itertools.chain([0], r[:-1]), r)]
assert_allclose(nAB, expect)
def test_kdtree_nan():
vals = [1, 5, -10, 7, -4, -16, -6, 6, 3, -11]
n = len(vals)
data = np.concatenate([vals, np.full(n, np.nan)])[:, None]
with pytest.raises(ValueError, match="must be finite"):
KDTree(data)
def test_nonfinite_inputs_gh_18223():
rng = np.random.default_rng(12345)
coords = rng.uniform(size=(100, 3), low=0.0, high=0.1)
t = KDTree(coords, balanced_tree=False, compact_nodes=False)
bad_coord = [np.nan for _ in range(3)]
with pytest.raises(ValueError, match="must be finite"):
t.query(bad_coord)
with pytest.raises(ValueError, match="must be finite"):
t.query_ball_point(bad_coord, 1)
coords[0, :] = np.nan
with pytest.raises(ValueError, match="must be finite"):
KDTree(coords, balanced_tree=True, compact_nodes=False)
with pytest.raises(ValueError, match="must be finite"):
KDTree(coords, balanced_tree=False, compact_nodes=True)
with pytest.raises(ValueError, match="must be finite"):
KDTree(coords, balanced_tree=True, compact_nodes=True)
with pytest.raises(ValueError, match="must be finite"):
KDTree(coords, balanced_tree=False, compact_nodes=False)
@pytest.mark.parametrize("incantation", [cKDTree, KDTree])
def test_gh_18800(incantation):
# our prohibition on non-finite values
# in kd-tree workflows means we need
# coercion to NumPy arrays enforced
class ArrLike(np.ndarray):
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
# we override all() to mimic the problem
# pandas DataFrames encountered in gh-18800
obj.all = None
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.all = getattr(obj, 'all', None)
points = [
[66.22, 32.54],
[22.52, 22.39],
[31.01, 81.21],
]
arr = np.array(points)
arr_like = ArrLike(arr)
tree = incantation(points, 10)
tree.query(arr_like, 1)
tree.query_ball_point(arr_like, 200)
| _Test_sorted_query_ball_point |
python | ansible__ansible | lib/ansible/executor/module_common.py | {
"start": 28282,
"end": 28476
} | class ____:
@classmethod
def __post_init__(cls):
_dataclass_validation.inject_post_init_validation(cls)
@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
| ModuleMetadata |
python | langchain-ai__langchain | libs/core/langchain_core/callbacks/manager.py | {
"start": 23354,
"end": 26027
} | class ____(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
def get_sync(self) -> CallbackManagerForLLMRun:
"""Get the equivalent sync RunManager.
Returns:
The sync RunManager.
"""
return CallbackManagerForLLMRun(
run_id=self.run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_llm_new_token(
self,
token: str,
*,
chunk: GenerationChunk | ChatGenerationChunk | None = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token: The new token.
chunk: The chunk.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
@shielded
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response: The LLM result.
**kwargs: Additional keyword arguments.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
@shielded
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error: The error.
**kwargs: Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.
"""
if not self.handlers:
return
await ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
| AsyncCallbackManagerForLLMRun |
python | openai__openai-python | src/openai/cli/_api/fine_tuning/jobs.py | {
"start": 3546,
"end": 3676
} | class ____(BaseModel):
id: str
after: Omittable[str] = omit
limit: Omittable[int] = omit
| CLIFineTuningJobsListEventsArgs |
python | doocs__leetcode | solution/0400-0499/0419.Battleships in a Board/Solution.py | {
"start": 0,
"end": 470
} | class ____:
def countBattleships(self, board: List[List[str]]) -> int:
m, n = len(board), len(board[0])
ans = 0
for i in range(m):
for j in range(n):
if board[i][j] == '.':
continue
if i > 0 and board[i - 1][j] == 'X':
continue
if j > 0 and board[i][j - 1] == 'X':
continue
ans += 1
return ans
| Solution |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing.py | {
"start": 2279,
"end": 3157
} | class ____:
monotonic_list = list(range(10**6))
non_monotonic_list = list(range(50)) + [54, 53, 52, 51] + list(range(55, 10**6 - 1))
params = [
("Int64", "UInt64", "Float64"),
(True, False),
]
param_names = ["dtype", "monotonic"]
def setup(self, dtype, monotonic):
indices = {
True: Index(self.monotonic_list, dtype=dtype),
False: Index(self.non_monotonic_list, dtype=dtype).append(
Index([NA], dtype=dtype)
),
}
self.data = indices[monotonic]
self.indexer = np.arange(300, 1_000)
self.data_dups = self.data.append(self.data)
def time_get_indexer(self, dtype, monotonic):
self.data.get_indexer(self.indexer)
def time_get_indexer_dups(self, dtype, monotonic):
self.data.get_indexer_for(self.indexer)
| NumericMaskedIndexing |
python | scipy__scipy | scipy/special/tests/test_orthogonal.py | {
"start": 294,
"end": 2722
} | class ____:
def test_chebyc(self):
C0 = orth.chebyc(0)
C1 = orth.chebyc(1)
with np.errstate(all='ignore'):
C2 = orth.chebyc(2)
C3 = orth.chebyc(3)
C4 = orth.chebyc(4)
C5 = orth.chebyc(5)
assert_allclose(C0.c, [2], atol=1.5e-13, rtol=0)
assert_allclose(C1.c, [1, 0], atol=1.5e-13, rtol=0)
assert_allclose(C2.c, [1, 0, -2], atol=1.5e-13, rtol=0)
assert_allclose(C3.c, [1, 0, -3, 0], atol=1.5e-13, rtol=0)
assert_allclose(C4.c, [1, 0, -4, 0, 2], atol=1.5e-13, rtol=0)
assert_allclose(C5.c, [1, 0, -5, 0, 5, 0],atol=1.5e-13, rtol=0)
def test_chebys(self):
S0 = orth.chebys(0)
S1 = orth.chebys(1)
S2 = orth.chebys(2)
S3 = orth.chebys(3)
S4 = orth.chebys(4)
S5 = orth.chebys(5)
assert_allclose(S0.c, [1], atol=1.5e-13, rtol=0)
assert_allclose(S1.c, [1, 0], atol=1.5e-13, rtol=0)
assert_allclose(S2.c, [1, 0, -1], atol=1.5e-13, rtol=0)
assert_allclose(S3.c, [1, 0, -2, 0], atol=1.5e-13, rtol=0)
assert_allclose(S4.c, [1, 0, -3, 0, 1], atol=1.5e-13, rtol=0)
assert_allclose(S5.c, [1, 0, -4, 0, 3, 0], atol=1.5e-13, rtol=0)
def test_chebyt(self):
T0 = orth.chebyt(0)
T1 = orth.chebyt(1)
T2 = orth.chebyt(2)
T3 = orth.chebyt(3)
T4 = orth.chebyt(4)
T5 = orth.chebyt(5)
assert_allclose(T0.c, [1], atol=1.5e-13, rtol=0)
assert_allclose(T1.c, [1, 0], atol=1.5e-13, rtol=0)
assert_allclose(T2.c, [2, 0, -1], atol=1.5e-13, rtol=0)
assert_allclose(T3.c, [4, 0, -3, 0], atol=1.5e-13, rtol=0)
assert_allclose(T4.c, [8, 0, -8, 0, 1], atol=1.5e-13, rtol=0)
assert_allclose(T5.c, [16, 0, -20, 0, 5, 0], atol=1.5e-13, rtol=0)
def test_chebyu(self):
U0 = orth.chebyu(0)
U1 = orth.chebyu(1)
U2 = orth.chebyu(2)
U3 = orth.chebyu(3)
U4 = orth.chebyu(4)
U5 = orth.chebyu(5)
assert_allclose(U0.c, [1], atol=1.5e-13, rtol=0)
assert_allclose(U1.c, [2, 0], atol=1.5e-13, rtol=0)
assert_allclose(U2.c, [4, 0, -1], atol=1.5e-13, rtol=0)
assert_allclose(U3.c, [8, 0, -4, 0], atol=1.5e-13, rtol=0)
assert_allclose(U4.c, [16, 0, -12, 0, 1], atol=1.5e-13, rtol=0)
assert_allclose(U5.c, [32, 0, -32, 0, 6, 0], atol=1.5e-13, rtol=0)
| TestCheby |
python | graphql-python__graphene | graphene/relay/tests/test_custom_global_id.py | {
"start": 7224,
"end": 10349
} | class ____:
def setup_method(self):
self.user_list = [
{"id": 1, "name": "First"},
{"id": 2, "name": "Second"},
{"id": 3, "name": "Third"},
{"id": 4, "name": "Fourth"},
]
self.users = {user["id"]: user for user in self.user_list}
def test_must_define_to_global_id(self):
"""
Test that if the `to_global_id` method is not defined, we can query the object, but we can't request its ID.
"""
class CustomGlobalIDType(BaseGlobalIDType):
graphene_type = Int
@classmethod
def resolve_global_id(cls, info, global_id):
_type = info.return_type.graphene_type._meta.name
return _type, global_id
class CustomNode(Node):
class Meta:
global_id_type = CustomGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
self.graphql_schema = self.schema.graphql_schema
query = """query {
user(id: 2) {
name
}
}"""
result = graphql_sync(self.graphql_schema, query)
assert not result.errors
assert result.data["user"]["name"] == self.user_list[1]["name"]
query = """query {
user(id: 2) {
id
name
}
}"""
result = graphql_sync(self.graphql_schema, query)
assert result.errors is not None
assert len(result.errors) == 1
assert result.errors[0].path == ["user", "id"]
def test_must_define_resolve_global_id(self):
"""
Test that if the `resolve_global_id` method is not defined, we can't query the object by ID.
"""
class CustomGlobalIDType(BaseGlobalIDType):
graphene_type = Int
@classmethod
def to_global_id(cls, _type, _id):
return _id
class CustomNode(Node):
class Meta:
global_id_type = CustomGlobalIDType
class User(ObjectType):
class Meta:
interfaces = [CustomNode]
name = String()
@classmethod
def get_node(cls, _type, _id):
return self.users[_id]
class RootQuery(ObjectType):
user = CustomNode.Field(User)
self.schema = Schema(query=RootQuery, types=[User])
self.graphql_schema = self.schema.graphql_schema
query = """query {
user(id: 2) {
id
name
}
}"""
result = graphql_sync(self.graphql_schema, query)
assert result.errors is not None
assert len(result.errors) == 1
assert result.errors[0].path == ["user"]
| TestIncompleteCustomGlobalID |
python | jazzband__django-waffle | waffle/mixins.py | {
"start": 1377,
"end": 1820
} | class ____(BaseWaffleMixin):
"""
Checks that as switch is active, or 404. Operates like the FBV decorator
waffle_switch.
"""
waffle_switch: str | None = None
def dispatch(self, request, *args, **kwargs):
active = self.validate_waffle(self.waffle_switch, switch_is_active)
if not active:
return self.invalid_waffle()
return super().dispatch(request, *args, **kwargs)
| WaffleSwitchMixin |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py | {
"start": 15506,
"end": 17110
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = XLMRobertaXLCrossAttention if is_cross_attention else XLMRobertaXLSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = XLMRobertaXLSelfOutput(config)
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
intermediate = self.self_attn_layer_norm(hidden_states)
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
intermediate,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
| XLMRobertaXLAttention |
python | kamyu104__LeetCode-Solutions | Python/reverse-odd-levels-of-binary-tree.py | {
"start": 165,
"end": 872
} | class ____(object):
def reverseOddLevels(self, root):
"""
:type root: Optional[TreeNode]
:rtype: Optional[TreeNode]
"""
q = [root]
parity = 0
while q:
if parity:
left, right = 0, len(q)-1
while left < right:
q[left].val, q[right].val = q[right].val, q[left].val
left += 1
right -= 1
if not q[0].left:
break
new_q = []
for node in q:
new_q.append(node.left)
new_q.append(node.right)
q = new_q
parity ^= 1
return root
| Solution |
python | pytorch__pytorch | .github/scripts/test_trymerge.py | {
"start": 23111,
"end": 36148
} | class ____(TestCase):
def test_get_classifications(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 109584)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
self.assertTrue(
checks[
"pull / linux-focal-py3.11-clang10 / test (dynamo, 1, 2, linux.2xlarge)"
].classification
== "BROKEN_TRUNK"
)
self.assertTrue(
checks[
"trunk / win-vs2019-cpu-py3 / test (default, 2, 3, windows.4xlarge.nonephemeral)"
].classification
== "FLAKY"
)
self.assertTrue(
checks[
"pull / linux-jammy-py3.8-gcc11 / test (distributed, 1, 2, linux.2xlarge)"
].classification
== "FLAKY"
)
self.assertTrue(
checks[
"pull / linux-focal-cuda11.8-py3.10-gcc9 / test (distributed, 1, 3, linux.8xlarge.nvidia.gpu)"
].classification
== "FLAKY"
)
# Set the threshold larger or equal to the number of ok failures
pending, failed, ignorable = categorize_checks(
checks, list(checks.keys()), ok_failed_checks_threshold=6
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 4)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 2)
# Not set any threshold, defaults to -1 to ignore all flaky and broken trunk failures
pending, failed, ignorable = categorize_checks(checks, list(checks.keys()))
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 4)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 2)
# Set the threshold lower than the number of ok failures
pending, failed, ignorable = categorize_checks(
checks, list(checks.keys()), ok_failed_checks_threshold=1
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 6)
self.assertTrue(len(ignorable["FLAKY"]) == 4)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 2)
# Set the threshold to 0 like when ignore_flaky_failures is on
pending, failed, ignorable = categorize_checks(
checks, list(checks.keys()), ok_failed_checks_threshold=1
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 6)
self.assertTrue(len(ignorable["FLAKY"]) == 4)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 2)
def test_get_classifications_flaky_fullname(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 110362)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
pending, failed, ignorable = categorize_checks(checks, list(checks.keys()))
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 1)
def test_get_classifications_invalid_cancel(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 110367)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
pending, failed, ignorable = categorize_checks(checks, list(checks.keys()))
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 0)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 0)
self.assertTrue(len(ignorable["UNSTABLE"]) == 3)
def test_get_classifications_similar_failures(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 109750)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
pending, failed, ignorable = categorize_checks(checks, list(checks.keys()))
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 1)
def test_get_classifications_unstable(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 104312)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
workflow_name = "linux-bionic-cuda12.1-py3.10-gcc9-bazel-test"
job_name = "build-and-test (default, 1, 1, linux.4xlarge.nvidia.gpu, unstable)"
self.assertTrue(
checks[f"pull / {workflow_name} / {job_name}"].classification == "UNSTABLE"
)
pending, failed, ignorable = categorize_checks(
checks, list(checks.keys()), ok_failed_checks_threshold=1
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["UNSTABLE"]) == 1)
# Add another test case where there is no unstable keyword in the job name, but
# the job has already been marked as unstable
pr = GitHubPR("pytorch", "executorch", 3318)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
print(checks)
workflow_name = "test-llama-app"
job_name = "mobile-job (android)"
self.assertTrue(
checks[f"Android / {workflow_name} / {job_name}"].classification
== "UNSTABLE"
)
pending, failed, ignorable = categorize_checks(
checks, list(checks.keys()), ok_failed_checks_threshold=1
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["UNSTABLE"]) == 1)
def test_get_classifications_broken_trunk(self, *args: Any) -> None:
# The mock merge base is the actual value returned by gh_fetch_merge_base
test_cases = [
{
# This PR had one broken trunk failure but it was run on a different shard
# than the one on the base commit. This should still count as broken trunk
"pr_num": 104214,
"related_failure_count": 0,
"flaky_or_broken_trunk": 1,
},
{
# This PR had one broken trunk failure and it used ghstack
"pr_num": 105145,
"related_failure_count": 0,
"flaky_or_broken_trunk": 1,
},
{
# The failure on the merge base was retried successfully and
# its conclusion changed from failure to success. We want to
# keep the failure record from the merge base so that it can
# be used to detect broken trunk
"pr_num": 107160,
"related_failure_count": 0,
"flaky_or_broken_trunk": 1,
},
{
# This PR used Dr.CI broken trunk classification
"pr_num": 111253,
"related_failure_count": 1,
"flaky_or_broken_trunk": 1,
},
]
for case in test_cases:
pr_num = case["pr_num"]
related_failure_count = case["related_failure_count"]
flaky_or_broken_trunk = case["flaky_or_broken_trunk"]
pr = GitHubPR("pytorch", "pytorch", pr_num)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
pending, failed, _ = categorize_checks(checks, list(checks.keys()))
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == related_failure_count)
# When the ok_failed_checks_threshold is set to 0, the broken trunk failure
# won't be ignored
pending, failed, _ = categorize_checks(
checks, list(checks.keys()), ok_failed_checks_threshold=0
)
self.assertTrue(len(pending) == 0)
self.assertTrue(
len(failed) == flaky_or_broken_trunk + related_failure_count
)
def test_ignore_current(self, *args: Any) -> None:
# Test various interactions of the failure classifier to ensure that ignore
# current checks takes place after other classifications: flaky, unstable,
# or broken trunk. Only actual new failures should be kept in the list of
# ignore current checks to use to record force merge with actual failures
flaky = "pull / linux-focal-cuda11.8-py3.10-gcc9 / test (distributed, 1, 3, linux.8xlarge.nvidia.gpu)"
broken_trunk = (
"pull / linux-focal-py3.11-clang10 / test (dynamo, 1, 2, linux.2xlarge)"
)
pr = GitHubPR("pytorch", "pytorch", 109584)
checks = pr.get_checkrun_conclusions()
# Known flaky failure takes precedence over ignore current (need to set the
# merge base here to get the results from Dr. CI, and that categorize the
# broken trunk failure too
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[broken_trunk, flaky],
)
self.assertTrue(checks[flaky].classification == "FLAKY")
self.assertTrue(checks[broken_trunk].classification == "BROKEN_TRUNK")
_, failed, ignorable = categorize_checks(checks, list(checks.keys()))
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["IGNORE_CURRENT_CHECK"]) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 4)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 2)
def test_get_classifications_wrong_workflow_name(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 123104)
checks = pr.get_checkrun_conclusions()
check_name = "linux-binary-conda / conda-py3_8-cuda11_8-build / build"
check_name_workflow_path = ".github/workflows/generated-linux-binary-conda-nightly.yml / conda-py3_8-cuda11_8-build / build"
# Mock a check where the workflow name uses the full path
checks[check_name_workflow_path] = JobCheckState(
check_name_workflow_path,
checks[check_name].url,
checks[check_name].status,
checks[check_name].classification,
checks[check_name].job_id,
checks[check_name].title,
checks[check_name].summary,
)
del checks[check_name]
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
pending, failed, ignorable = categorize_checks(
checks,
list(checks.keys()),
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 1)
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 0)
def test_ignore_failures_older_run_same_workflow(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 129013)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
[],
)
pending, failed, ignorable = categorize_checks(
checks,
list(checks.keys()),
)
self.assertTrue(len(pending) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["FLAKY"]) == 2)
self.assertTrue(len(ignorable["UNSTABLE"]) == 13)
@mock.patch("trymerge.read_merge_rules", side_effect=xla_merge_rules)
def test_dont_ignore_flaky_failures(self, *args: Any) -> None:
"""
Regression test for https://github.com/pytorch/test-infra/issues/4126
"""
pr = GitHubPR("pytorch", "pytorch", 105312)
repo = DummyGitRepo()
# Check that failure is classified as flaky but still raises exception
with warnings.catch_warnings(record=True) as w, self.assertRaises(RuntimeError):
find_matching_merge_rule(pr, repo)
self.assertEqual(len(w), 1)
self.assertIn(
"1 checks failed but were likely due flakiness or broken trunk",
str(w[0].message),
)
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
@mock.patch("trymerge.get_drci_classifications", return_value={})
| TestBypassFailures |
python | ray-project__ray | rllib/algorithms/dreamerv3/dreamerv3.py | {
"start": 2018,
"end": 20545
} | class ____(AlgorithmConfig):
"""Defines a configuration class from which a DreamerV3 can be built.
.. testcode::
from ray.rllib.algorithms.dreamerv3 import DreamerV3Config
config = (
DreamerV3Config()
.environment("CartPole-v1")
.training(
model_size="XS",
training_ratio=1,
# TODO
model={
"batch_size_B": 1,
"batch_length_T": 1,
"horizon_H": 1,
"gamma": 0.997,
"model_size": "XS",
},
)
)
config = config.learners(num_learners=0)
# Build a Algorithm object from the config and run 1 training iteration.
algo = config.build()
# algo.train()
del algo
.. testoutput::
:hide:
...
"""
def __init__(self, algo_class=None):
"""Initializes a DreamerV3Config instance."""
super().__init__(algo_class=algo_class or DreamerV3)
# fmt: off
# __sphinx_doc_begin__
# DreamerV3 specific settings:
self.model_size = "XS"
self.training_ratio = 1024
self.replay_buffer_config = {
"type": "EpisodeReplayBuffer",
"capacity": int(1e6),
}
self.world_model_lr = 1e-4
self.actor_lr = 3e-5
self.critic_lr = 3e-5
self.batch_size_B = 16
self.batch_length_T = 64
self.horizon_H = 15
self.gae_lambda = 0.95 # [1] eq. 7.
self.entropy_scale = 3e-4 # [1] eq. 11.
self.return_normalization_decay = 0.99 # [1] eq. 11 and 12.
self.train_critic = True
self.train_actor = True
self.intrinsic_rewards_scale = 0.1
self.world_model_grad_clip_by_global_norm = 1000.0
self.critic_grad_clip_by_global_norm = 100.0
self.actor_grad_clip_by_global_norm = 100.0
self.symlog_obs = "auto"
self.use_float16 = False
self.use_curiosity = False
# Reporting.
# DreamerV3 is super sample efficient and only needs very few episodes
# (normally) to learn. Leaving this at its default value would gravely
# underestimate the learning performance over the course of an experiment.
self.metrics_num_episodes_for_smoothing = 1
self.report_individual_batch_item_stats = False
self.report_dream_data = False
self.report_images_and_videos = False
# Override some of AlgorithmConfig's default values with DreamerV3-specific
# values.
self.lr = None
self.gamma = 0.997 # [1] eq. 7.
# Do not use! Set `batch_size_B` and `batch_length_T` instead.
self.train_batch_size = None
self.num_env_runners = 0
self.rollout_fragment_length = 1
# Dreamer only runs on the new API stack.
self.enable_rl_module_and_learner = True
self.enable_env_runner_and_connector_v2 = True
# TODO (sven): DreamerV3 still uses its own EnvRunner class. This env-runner
# does not use connectors. We therefore should not attempt to merge/broadcast
# the connector states between EnvRunners (if >0). Note that this is only
# relevant if num_env_runners > 0, which is normally not the case when using
# this algo.
self.use_worker_filter_stats = False
# __sphinx_doc_end__
# fmt: on
@override(AlgorithmConfig)
def build_env_to_module_connector(self, env, spaces, device):
connector = super().build_env_to_module_connector(env, spaces, device)
# Prepend the "is_first" connector such that the RSSM knows, when to insert
# its (learned) internal state into the batch.
# We have to do this before the `AddStatesFromEpisodesToBatch` piece
# such that the column is properly batched/time-ranked.
if self.add_default_connectors_to_learner_pipeline:
connector.insert_before(
AddStatesFromEpisodesToBatch,
AddIsFirstsToBatch(),
)
return connector
@property
def batch_size_B_per_learner(self):
"""Returns the batch_size_B per Learner worker.
Needed by some of the DreamerV3 loss math."""
return self.batch_size_B // (self.num_learners or 1)
@override(AlgorithmConfig)
def training(
self,
*,
model_size: Optional[str] = NotProvided,
training_ratio: Optional[float] = NotProvided,
batch_size_B: Optional[int] = NotProvided,
batch_length_T: Optional[int] = NotProvided,
horizon_H: Optional[int] = NotProvided,
gae_lambda: Optional[float] = NotProvided,
entropy_scale: Optional[float] = NotProvided,
return_normalization_decay: Optional[float] = NotProvided,
train_critic: Optional[bool] = NotProvided,
train_actor: Optional[bool] = NotProvided,
intrinsic_rewards_scale: Optional[float] = NotProvided,
world_model_lr: Optional[LearningRateOrSchedule] = NotProvided,
actor_lr: Optional[LearningRateOrSchedule] = NotProvided,
critic_lr: Optional[LearningRateOrSchedule] = NotProvided,
world_model_grad_clip_by_global_norm: Optional[float] = NotProvided,
critic_grad_clip_by_global_norm: Optional[float] = NotProvided,
actor_grad_clip_by_global_norm: Optional[float] = NotProvided,
symlog_obs: Optional[Union[bool, str]] = NotProvided,
use_float16: Optional[bool] = NotProvided,
replay_buffer_config: Optional[dict] = NotProvided,
use_curiosity: Optional[bool] = NotProvided,
**kwargs,
) -> Self:
"""Sets the training related configuration.
Args:
model_size: The main switch for adjusting the overall model size. See [1]
(table B) for more information on the effects of this setting on the
model architecture.
Supported values are "XS", "S", "M", "L", "XL" (as per the paper), as
well as, "nano", "micro", "mini", and "XXS" (for RLlib's
implementation). See ray.rllib.algorithms.dreamerv3.utils.
__init__.py for the details on what exactly each size does to the layer
sizes, number of layers, etc..
training_ratio: The ratio of total steps trained (sum of the sizes of all
batches ever sampled from the replay buffer) over the total env steps
taken (in the actual environment, not the dreamed one). For example,
if the training_ratio is 1024 and the batch size is 1024, we would take
1 env step for every training update: 1024 / 1. If the training ratio
is 512 and the batch size is 1024, we would take 2 env steps and then
perform a single training update (on a 1024 batch): 1024 / 2.
batch_size_B: The batch size (B) interpreted as number of rows (each of
length `batch_length_T`) to sample from the replay buffer in each
iteration.
batch_length_T: The batch length (T) interpreted as the length of each row
sampled from the replay buffer in each iteration. Note that
`batch_size_B` rows will be sampled in each iteration. Rows normally
contain consecutive data (consecutive timesteps from the same episode),
but there might be episode boundaries in a row as well.
horizon_H: The horizon (in timesteps) used to create dreamed data from the
world model, which in turn is used to train/update both actor- and
critic networks.
gae_lambda: The lambda parameter used for computing the GAE-style
value targets for the actor- and critic losses.
entropy_scale: The factor with which to multiply the entropy loss term
inside the actor loss.
return_normalization_decay: The decay value to use when computing the
running EMA values for return normalization (used in the actor loss).
train_critic: Whether to train the critic network. If False, `train_actor`
must also be False (cannot train actor w/o training the critic).
train_actor: Whether to train the actor network. If True, `train_critic`
must also be True (cannot train actor w/o training the critic).
intrinsic_rewards_scale: The factor to multiply intrinsic rewards with
before adding them to the extrinsic (environment) rewards.
world_model_lr: The learning rate or schedule for the world model optimizer.
actor_lr: The learning rate or schedule for the actor optimizer.
critic_lr: The learning rate or schedule for the critic optimizer.
world_model_grad_clip_by_global_norm: World model grad clipping value
(by global norm).
critic_grad_clip_by_global_norm: Critic grad clipping value
(by global norm).
actor_grad_clip_by_global_norm: Actor grad clipping value (by global norm).
symlog_obs: Whether to symlog observations or not. If set to "auto"
(default), will check for the environment's observation space and then
only symlog if not an image space.
use_float16: Whether to train with mixed float16 precision. In this mode,
model parameters are stored as float32, but all computations are
performed in float16 space (except for losses and distribution params
and outputs).
replay_buffer_config: Replay buffer config.
Only serves in DreamerV3 to set the capacity of the replay buffer.
Note though that in the paper ([1]) a size of 1M is used for all
benchmarks and there doesn't seem to be a good reason to change this
parameter.
Examples:
{
"type": "EpisodeReplayBuffer",
"capacity": 100000,
}
Returns:
This updated AlgorithmConfig object.
"""
# Not fully supported/tested yet.
if use_curiosity is not NotProvided:
raise ValueError(
"`DreamerV3Config.curiosity` is not fully supported and tested yet! "
"It thus remains disabled for now."
)
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if model_size is not NotProvided:
self.model_size = model_size
if training_ratio is not NotProvided:
self.training_ratio = training_ratio
if batch_size_B is not NotProvided:
self.batch_size_B = batch_size_B
if batch_length_T is not NotProvided:
self.batch_length_T = batch_length_T
if horizon_H is not NotProvided:
self.horizon_H = horizon_H
if gae_lambda is not NotProvided:
self.gae_lambda = gae_lambda
if entropy_scale is not NotProvided:
self.entropy_scale = entropy_scale
if return_normalization_decay is not NotProvided:
self.return_normalization_decay = return_normalization_decay
if train_critic is not NotProvided:
self.train_critic = train_critic
if train_actor is not NotProvided:
self.train_actor = train_actor
if intrinsic_rewards_scale is not NotProvided:
self.intrinsic_rewards_scale = intrinsic_rewards_scale
if world_model_lr is not NotProvided:
self.world_model_lr = world_model_lr
if actor_lr is not NotProvided:
self.actor_lr = actor_lr
if critic_lr is not NotProvided:
self.critic_lr = critic_lr
if world_model_grad_clip_by_global_norm is not NotProvided:
self.world_model_grad_clip_by_global_norm = (
world_model_grad_clip_by_global_norm
)
if critic_grad_clip_by_global_norm is not NotProvided:
self.critic_grad_clip_by_global_norm = critic_grad_clip_by_global_norm
if actor_grad_clip_by_global_norm is not NotProvided:
self.actor_grad_clip_by_global_norm = actor_grad_clip_by_global_norm
if symlog_obs is not NotProvided:
self.symlog_obs = symlog_obs
if use_float16 is not NotProvided:
self.use_float16 = use_float16
if replay_buffer_config is not NotProvided:
# Override entire `replay_buffer_config` if `type` key changes.
# Update, if `type` key remains the same or is not specified.
new_replay_buffer_config = deep_update(
{"replay_buffer_config": self.replay_buffer_config},
{"replay_buffer_config": replay_buffer_config},
False,
["replay_buffer_config"],
["replay_buffer_config"],
)
self.replay_buffer_config = new_replay_buffer_config["replay_buffer_config"]
return self
@override(AlgorithmConfig)
def reporting(
self,
*,
report_individual_batch_item_stats: Optional[bool] = NotProvided,
report_dream_data: Optional[bool] = NotProvided,
report_images_and_videos: Optional[bool] = NotProvided,
**kwargs,
):
"""Sets the reporting related configuration.
Args:
report_individual_batch_item_stats: Whether to include loss and other stats
per individual timestep inside the training batch in the result dict
returned by `training_step()`. If True, besides the `CRITIC_L_total`,
the individual critic loss values per batch row and time axis step
in the train batch (CRITIC_L_total_B_T) will also be part of the
results.
report_dream_data: Whether to include the dreamed trajectory data in the
result dict returned by `training_step()`. If True, however, will
slice each reported item in the dream data down to the shape.
(H, B, t=0, ...), where H is the horizon and B is the batch size. The
original time axis will only be represented by the first timestep
to not make this data too large to handle.
report_images_and_videos: Whether to include any image/video data in the
result dict returned by `training_step()`.
**kwargs:
Returns:
This updated AlgorithmConfig object.
"""
super().reporting(**kwargs)
if report_individual_batch_item_stats is not NotProvided:
self.report_individual_batch_item_stats = report_individual_batch_item_stats
if report_dream_data is not NotProvided:
self.report_dream_data = report_dream_data
if report_images_and_videos is not NotProvided:
self.report_images_and_videos = report_images_and_videos
return self
@override(AlgorithmConfig)
def validate(self) -> None:
# Call the super class' validation method first.
super().validate()
# Make sure, users are not using DreamerV3 yet for multi-agent:
if self.is_multi_agent:
self._value_error("DreamerV3 does NOT support multi-agent setups yet!")
# Make sure, we are configure for the new API stack.
if not self.enable_rl_module_and_learner:
self._value_error(
"DreamerV3 must be run with `config.api_stack("
"enable_rl_module_and_learner=True)`!"
)
# If run on several Learners, the provided batch_size_B must be a multiple
# of `num_learners`.
if self.num_learners > 1 and (self.batch_size_B % self.num_learners != 0):
self._value_error(
f"Your `batch_size_B` ({self.batch_size_B}) must be a multiple of "
f"`num_learners` ({self.num_learners}) in order for "
"DreamerV3 to be able to split batches evenly across your Learner "
"processes."
)
# Cannot train actor w/o critic.
if self.train_actor and not self.train_critic:
self._value_error(
"Cannot train actor network (`train_actor=True`) w/o training critic! "
"Make sure you either set `train_critic=True` or `train_actor=False`."
)
# Use DreamerV3 specific batch size settings.
if self.train_batch_size is not None:
self._value_error(
"`train_batch_size` should NOT be set! Use `batch_size_B` and "
"`batch_length_T` instead."
)
# Must be run with `EpisodeReplayBuffer` type.
if self.replay_buffer_config.get("type") != "EpisodeReplayBuffer":
self._value_error(
"DreamerV3 must be run with the `EpisodeReplayBuffer` type! None "
"other supported."
)
@override(AlgorithmConfig)
def get_default_learner_class(self):
if self.framework_str == "torch":
from ray.rllib.algorithms.dreamerv3.torch.dreamerv3_torch_learner import (
DreamerV3TorchLearner,
)
return DreamerV3TorchLearner
else:
raise ValueError(f"The framework {self.framework_str} is not supported.")
@override(AlgorithmConfig)
def get_default_rl_module_spec(self) -> RLModuleSpec:
if self.framework_str == "torch":
from ray.rllib.algorithms.dreamerv3.torch.dreamerv3_torch_rl_module import (
DreamerV3TorchRLModule as module,
)
else:
raise ValueError(f"The framework {self.framework_str} is not supported.")
return RLModuleSpec(module_class=module, catalog_class=DreamerV3Catalog)
@property
@override(AlgorithmConfig)
def _model_config_auto_includes(self) -> Dict[str, Any]:
return super()._model_config_auto_includes | {
"gamma": self.gamma,
"horizon_H": self.horizon_H,
"model_size": self.model_size,
"symlog_obs": self.symlog_obs,
"use_float16": self.use_float16,
"batch_length_T": self.batch_length_T,
}
| DreamerV3Config |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/customizing-existing-component/global/4-component.py | {
"start": 64,
"end": 178
} | class ____(SlingReplicationCollectionComponent):
"""Customized Sling component."""
| CustomSlingReplicationComponent |
python | encode__starlette | tests/test_applications.py | {
"start": 2800,
"end": 18246
} | class ____(Exception):
pass
async def websocket_raise_custom(websocket: WebSocket) -> None:
await websocket.accept()
raise CustomWSException()
def custom_ws_exception_handler(websocket: WebSocket, exc: CustomWSException) -> None:
anyio.from_thread.run(websocket.close, status.WS_1013_TRY_AGAIN_LATER)
users = Router(
routes=[
Route("/", endpoint=all_users_page),
Route("/{username}", endpoint=user_page),
]
)
subdomain = Router(
routes=[
Route("/", custom_subdomain),
]
)
exception_handlers = {
500: error_500,
405: method_not_allowed,
HTTPException: http_exception,
CustomWSException: custom_ws_exception_handler,
}
middleware = [Middleware(TrustedHostMiddleware, allowed_hosts=["testserver", "*.example.org"])]
app = Starlette(
routes=[
Route("/func", endpoint=func_homepage),
Route("/async", endpoint=async_homepage),
Route("/class", endpoint=Homepage),
Route("/500", endpoint=runtime_error),
WebSocketRoute("/ws", endpoint=websocket_endpoint),
WebSocketRoute("/ws-raise-websocket", endpoint=websocket_raise_websocket_exception),
WebSocketRoute("/ws-raise-http", endpoint=websocket_raise_http_exception),
WebSocketRoute("/ws-raise-custom", endpoint=websocket_raise_custom),
Mount("/users", app=users),
Host("{subdomain}.example.org", app=subdomain),
],
exception_handlers=exception_handlers, # type: ignore
middleware=middleware,
)
@pytest.fixture
def client(test_client_factory: TestClientFactory) -> Generator[TestClient, None, None]:
with test_client_factory(app) as client:
yield client
def test_url_path_for() -> None:
assert app.url_path_for("func_homepage") == "/func"
def test_func_route(client: TestClient) -> None:
response = client.get("/func")
assert response.status_code == 200
assert response.text == "Hello, world!"
response = client.head("/func")
assert response.status_code == 200
assert response.text == ""
def test_async_route(client: TestClient) -> None:
response = client.get("/async")
assert response.status_code == 200
assert response.text == "Hello, world!"
def test_class_route(client: TestClient) -> None:
response = client.get("/class")
assert response.status_code == 200
assert response.text == "Hello, world!"
def test_mounted_route(client: TestClient) -> None:
response = client.get("/users/")
assert response.status_code == 200
assert response.text == "Hello, everyone!"
def test_mounted_route_path_params(client: TestClient) -> None:
response = client.get("/users/tomchristie")
assert response.status_code == 200
assert response.text == "Hello, tomchristie!"
def test_subdomain_route(test_client_factory: TestClientFactory) -> None:
client = test_client_factory(app, base_url="https://foo.example.org/")
response = client.get("/")
assert response.status_code == 200
assert response.text == "Subdomain: foo"
def test_websocket_route(client: TestClient) -> None:
with client.websocket_connect("/ws") as session:
text = session.receive_text()
assert text == "Hello, world!"
def test_400(client: TestClient) -> None:
response = client.get("/404")
assert response.status_code == 404
assert response.json() == {"detail": "Not Found"}
def test_405(client: TestClient) -> None:
response = client.post("/func")
assert response.status_code == 405
assert response.json() == {"detail": "Custom message"}
response = client.post("/class")
assert response.status_code == 405
assert response.json() == {"detail": "Custom message"}
def test_500(test_client_factory: TestClientFactory) -> None:
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/500")
assert response.status_code == 500
assert response.json() == {"detail": "Server Error"}
def test_websocket_raise_websocket_exception(client: TestClient) -> None:
with client.websocket_connect("/ws-raise-websocket") as session:
response = session.receive()
assert response == {
"type": "websocket.close",
"code": status.WS_1003_UNSUPPORTED_DATA,
"reason": "",
}
def test_websocket_raise_http_exception(client: TestClient) -> None:
with pytest.raises(WebSocketDenialResponse) as exc:
with client.websocket_connect("/ws-raise-http"):
pass # pragma: no cover
assert exc.value.status_code == 401
assert exc.value.content == b'{"detail":"Unauthorized"}'
def test_websocket_raise_custom_exception(client: TestClient) -> None:
with client.websocket_connect("/ws-raise-custom") as session:
response = session.receive()
assert response == {
"type": "websocket.close",
"code": status.WS_1013_TRY_AGAIN_LATER,
"reason": "",
}
def test_middleware(test_client_factory: TestClientFactory) -> None:
client = test_client_factory(app, base_url="http://incorrecthost")
response = client.get("/func")
assert response.status_code == 400
assert response.text == "Invalid host header"
def test_routes() -> None:
assert app.routes == [
Route("/func", endpoint=func_homepage, methods=["GET"]),
Route("/async", endpoint=async_homepage, methods=["GET"]),
Route("/class", endpoint=Homepage),
Route("/500", endpoint=runtime_error, methods=["GET"]),
WebSocketRoute("/ws", endpoint=websocket_endpoint),
WebSocketRoute("/ws-raise-websocket", endpoint=websocket_raise_websocket_exception),
WebSocketRoute("/ws-raise-http", endpoint=websocket_raise_http_exception),
WebSocketRoute("/ws-raise-custom", endpoint=websocket_raise_custom),
Mount(
"/users",
app=Router(
routes=[
Route("/", endpoint=all_users_page),
Route("/{username}", endpoint=user_page),
]
),
),
Host(
"{subdomain}.example.org",
app=Router(routes=[Route("/", endpoint=custom_subdomain)]),
),
]
def test_app_mount(tmpdir: Path, test_client_factory: TestClientFactory) -> None:
path = os.path.join(tmpdir, "example.txt")
with open(path, "w") as file:
file.write("<file content>")
app = Starlette(
routes=[
Mount("/static", StaticFiles(directory=tmpdir)),
]
)
client = test_client_factory(app)
response = client.get("/static/example.txt")
assert response.status_code == 200
assert response.text == "<file content>"
response = client.post("/static/example.txt")
assert response.status_code == 405
assert response.text == "Method Not Allowed"
def test_app_debug(test_client_factory: TestClientFactory) -> None:
async def homepage(request: Request) -> None:
raise RuntimeError()
app = Starlette(
routes=[
Route("/", homepage),
],
)
app.debug = True
client = test_client_factory(app, raise_server_exceptions=False)
response = client.get("/")
assert response.status_code == 500
assert "RuntimeError" in response.text
assert app.debug
def test_app_add_route(test_client_factory: TestClientFactory) -> None:
async def homepage(request: Request) -> PlainTextResponse:
return PlainTextResponse("Hello, World!")
app = Starlette(
routes=[
Route("/", endpoint=homepage),
]
)
client = test_client_factory(app)
response = client.get("/")
assert response.status_code == 200
assert response.text == "Hello, World!"
def test_app_add_websocket_route(test_client_factory: TestClientFactory) -> None:
async def websocket_endpoint(session: WebSocket) -> None:
await session.accept()
await session.send_text("Hello, world!")
await session.close()
app = Starlette(
routes=[
WebSocketRoute("/ws", endpoint=websocket_endpoint),
]
)
client = test_client_factory(app)
with client.websocket_connect("/ws") as session:
text = session.receive_text()
assert text == "Hello, world!"
def test_app_add_event_handler(test_client_factory: TestClientFactory) -> None:
startup_complete = False
cleanup_complete = False
def run_startup() -> None:
nonlocal startup_complete
startup_complete = True
def run_cleanup() -> None:
nonlocal cleanup_complete
cleanup_complete = True
with pytest.deprecated_call(match="The on_startup and on_shutdown parameters are deprecated"):
app = Starlette(
on_startup=[run_startup],
on_shutdown=[run_cleanup],
)
assert not startup_complete
assert not cleanup_complete
with test_client_factory(app):
assert startup_complete
assert not cleanup_complete
assert startup_complete
assert cleanup_complete
def test_app_async_cm_lifespan(test_client_factory: TestClientFactory) -> None:
startup_complete = False
cleanup_complete = False
@asynccontextmanager
async def lifespan(app: ASGIApp) -> AsyncGenerator[None, None]:
nonlocal startup_complete, cleanup_complete
startup_complete = True
yield
cleanup_complete = True
app = Starlette(lifespan=lifespan)
assert not startup_complete
assert not cleanup_complete
with test_client_factory(app):
assert startup_complete
assert not cleanup_complete
assert startup_complete
assert cleanup_complete
deprecated_lifespan = pytest.mark.filterwarnings(
r"ignore"
r":(async )?generator function lifespans are deprecated, use an "
r"@contextlib\.asynccontextmanager function instead"
r":DeprecationWarning"
r":starlette.routing"
)
@deprecated_lifespan
def test_app_async_gen_lifespan(test_client_factory: TestClientFactory) -> None:
startup_complete = False
cleanup_complete = False
async def lifespan(app: ASGIApp) -> AsyncGenerator[None, None]:
nonlocal startup_complete, cleanup_complete
startup_complete = True
yield
cleanup_complete = True
app = Starlette(lifespan=lifespan) # type: ignore
assert not startup_complete
assert not cleanup_complete
with test_client_factory(app):
assert startup_complete
assert not cleanup_complete
assert startup_complete
assert cleanup_complete
@deprecated_lifespan
def test_app_sync_gen_lifespan(test_client_factory: TestClientFactory) -> None:
startup_complete = False
cleanup_complete = False
def lifespan(app: ASGIApp) -> Generator[None, None, None]:
nonlocal startup_complete, cleanup_complete
startup_complete = True
yield
cleanup_complete = True
app = Starlette(lifespan=lifespan) # type: ignore
assert not startup_complete
assert not cleanup_complete
with test_client_factory(app):
assert startup_complete
assert not cleanup_complete
assert startup_complete
assert cleanup_complete
def test_decorator_deprecations() -> None:
app = Starlette()
with pytest.deprecated_call(
match=("The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0.")
) as record:
app.exception_handler(500)(http_exception)
assert len(record) == 1
with pytest.deprecated_call(
match=("The `middleware` decorator is deprecated, and will be removed in version 1.0.0.")
) as record:
async def middleware(request: Request, call_next: RequestResponseEndpoint) -> None: ... # pragma: no cover
app.middleware("http")(middleware)
assert len(record) == 1
with pytest.deprecated_call(
match=("The `route` decorator is deprecated, and will be removed in version 1.0.0.")
) as record:
app.route("/")(async_homepage)
assert len(record) == 1
with pytest.deprecated_call(
match=("The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0.")
) as record:
app.websocket_route("/ws")(websocket_endpoint)
assert len(record) == 1
with pytest.deprecated_call(
match=("The `on_event` decorator is deprecated, and will be removed in version 1.0.0.")
) as record:
async def startup() -> None: ... # pragma: no cover
app.on_event("startup")(startup)
assert len(record) == 1
def test_middleware_stack_init(test_client_factory: TestClientFactory) -> None:
class NoOpMiddleware:
def __init__(self, app: ASGIApp):
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
await self.app(scope, receive, send)
class SimpleInitializableMiddleware:
counter = 0
def __init__(self, app: ASGIApp):
self.app = app
SimpleInitializableMiddleware.counter += 1
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
await self.app(scope, receive, send)
def get_app() -> ASGIApp:
app = Starlette()
app.add_middleware(SimpleInitializableMiddleware)
app.add_middleware(NoOpMiddleware)
return app
app = get_app()
with test_client_factory(app):
pass
assert SimpleInitializableMiddleware.counter == 1
test_client_factory(app).get("/foo")
assert SimpleInitializableMiddleware.counter == 1
app = get_app()
test_client_factory(app).get("/foo")
assert SimpleInitializableMiddleware.counter == 2
def test_middleware_args(test_client_factory: TestClientFactory) -> None:
calls: list[str] = []
class MiddlewareWithArgs:
def __init__(self, app: ASGIApp, arg: str) -> None:
self.app = app
self.arg = arg
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
calls.append(self.arg)
await self.app(scope, receive, send)
app = Starlette()
app.add_middleware(MiddlewareWithArgs, "foo")
app.add_middleware(MiddlewareWithArgs, "bar")
with test_client_factory(app):
pass
assert calls == ["bar", "foo"]
def test_middleware_factory(test_client_factory: TestClientFactory) -> None:
calls: list[str] = []
def _middleware_factory(app: ASGIApp, arg: str) -> ASGIApp:
async def _app(scope: Scope, receive: Receive, send: Send) -> None:
calls.append(arg)
await app(scope, receive, send)
return _app
def get_middleware_factory() -> Callable[[ASGIApp, str], ASGIApp]:
return _middleware_factory
app = Starlette()
app.add_middleware(_middleware_factory, arg="foo")
app.add_middleware(get_middleware_factory(), "bar")
with test_client_factory(app):
pass
assert calls == ["bar", "foo"]
def test_lifespan_app_subclass() -> None:
# This test exists to make sure that subclasses of Starlette
# (like FastAPI) are compatible with the types hints for Lifespan
class App(Starlette):
pass
@asynccontextmanager
async def lifespan(app: App) -> AsyncIterator[None]: # pragma: no cover
yield
App(lifespan=lifespan)
| CustomWSException |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py | {
"start": 15455,
"end": 24643
} | class ____(BaseVariables):
"""
Variables for templating an ECS job.
"""
task_definition_arn: Optional[str] = Field(
title="Task Definition ARN",
default=None,
description=(
"An identifier for an existing task definition to use. If set, options that"
" require changes to the task definition will be ignored. All contents of "
"the task definition in the job configuration will be ignored."
),
)
env: Dict[str, Optional[str]] = Field(
title="Environment Variables",
default_factory=dict,
description=(
"Environment variables to provide to the task run. These variables are set "
"on the Prefect container at task runtime. These will not be set on the "
"task definition."
),
)
aws_credentials: AwsCredentials = Field(
title="AWS Credentials",
default_factory=AwsCredentials,
description=(
"The AWS credentials to use to connect to ECS. If not provided, credentials"
" will be inferred from the local environment following AWS's boto client's"
" rules."
),
)
cluster: Optional[str] = Field(
default=None,
description=(
"The ECS cluster to run the task in. An ARN or name may be provided. If "
"not provided, the default cluster will be used."
),
)
family: Optional[str] = Field(
default=None,
description=(
"A family for the task definition. If not provided, it will be inferred "
"from the task definition. If the task definition does not have a family, "
"the name will be generated. When flow and deployment metadata is "
"available, the generated name will include their names. Values for this "
"field will be slugified to match AWS character requirements."
),
)
launch_type: Literal["FARGATE", "EC2", "EXTERNAL", "FARGATE_SPOT"] = Field(
default=ECS_DEFAULT_LAUNCH_TYPE,
description=(
"The type of ECS task run infrastructure that should be used. Note that"
" 'FARGATE_SPOT' is not a formal ECS launch type, but we will configure"
" the proper capacity provider strategy if set here."
),
)
capacity_provider_strategy: List[CapacityProvider] = Field(
default_factory=list,
description=(
"The capacity provider strategy to use when running the task. "
"If a capacity provider strategy is specified, the selected launch"
" type will be ignored."
),
)
image: Optional[str] = Field(
default=None,
description=(
"The image to use for the Prefect container in the task. If this value is "
"not null, it will override the value in the task definition. This value "
"defaults to a Prefect base image matching your local versions."
),
)
cpu: Optional[int] = Field(
title="CPU",
default=None,
description=(
"The amount of CPU to provide to the ECS task. Valid amounts are "
"specified in the AWS documentation. If not provided, a default value of "
f"{ECS_DEFAULT_CPU} will be used unless present on the task definition."
),
)
memory: Optional[int] = Field(
default=None,
description=(
"The amount of memory to provide to the ECS task. Valid amounts are "
"specified in the AWS documentation. If not provided, a default value of "
f"{ECS_DEFAULT_MEMORY} will be used unless present on the task definition."
),
)
container_name: Optional[str] = Field(
default=None,
description=(
"The name of the container flow run orchestration will occur in. If not "
f"specified, a default value of {ECS_DEFAULT_CONTAINER_NAME} will be used "
"and if that is not found in the task definition the first container will "
"be used."
),
)
prefect_api_key_secret_arn: Optional[str] = Field(
title="Prefect API Key Secret ARN",
default=None,
description=(
"An ARN of an AWS secret containing a Prefect API key. This key will be used "
"to authenticate ECS tasks with Prefect Cloud. If not provided, the "
"PREFECT_API_KEY environment variable will be used if the worker has one."
),
)
prefect_api_auth_string_secret_arn: Optional[str] = Field(
title="Prefect API Auth String Secret ARN",
default=None,
description=(
"An ARN of an AWS secret containing a Prefect API auth string. This string will be used "
"to authenticate ECS tasks with Prefect Cloud. If not provided, the "
"PREFECT_API_AUTH_STRING environment variable will be used if the worker has one."
),
)
task_role_arn: Optional[str] = Field(
title="Task Role ARN",
default=None,
description=(
"A role to attach to the task run. This controls the permissions of the "
"task while it is running."
),
)
execution_role_arn: Optional[str] = Field(
title="Execution Role ARN",
default=None,
description=(
"An execution role to use for the task. This controls the permissions of "
"the task when it is launching. If this value is not null, it will "
"override the value in the task definition. An execution role must be "
"provided to capture logs from the container."
),
)
vpc_id: Optional[str] = Field(
title="VPC ID",
default=None,
description=(
"The AWS VPC to link the task run to. This is only applicable when using "
"the 'awsvpc' network mode for your task. FARGATE tasks require this "
"network mode, but for EC2 tasks the default network mode is 'bridge'. "
"If using the 'awsvpc' network mode and this field is null, your default "
"VPC will be used. If no default VPC can be found, the task run will fail."
),
)
configure_cloudwatch_logs: Optional[bool] = Field(
default=None,
description=(
"If enabled, the Prefect container will be configured to send its output "
"to the AWS CloudWatch logs service. This functionality requires an "
"execution role with logs:CreateLogStream, logs:CreateLogGroup, and "
"logs:PutLogEvents permissions. The default for this field is `False`."
),
)
cloudwatch_logs_options: Dict[str, str] = Field(
default_factory=dict,
description=(
"When `configure_cloudwatch_logs` is enabled, this setting may be used to"
" pass additional options to the CloudWatch logs configuration or override"
" the default options. See the [AWS"
" documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html#create_awslogs_logdriver_options)" # noqa
" for available options. "
),
)
cloudwatch_logs_prefix: Optional[str] = Field(
default=None,
description=(
"When `configure_cloudwatch_logs` is enabled, this setting may be used to"
" set a prefix for the log group. If not provided, the default prefix will"
" be `prefect-logs_<work_pool_name>_<deployment_id>`. If"
" `awslogs-stream-prefix` is present in `Cloudwatch logs options` this"
" setting will be ignored."
),
)
network_configuration: Dict[str, Any] = Field(
default_factory=dict,
description=(
"When `network_configuration` is supplied it will override ECS Worker's"
"awsvpcConfiguration that defined in the ECS task executing your workload. "
"See the [AWS documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-service-awsvpcconfiguration.html)" # noqa
" for available options."
),
)
auto_deregister_task_definition: bool = Field(
default=False,
description=(
"If enabled, any task definitions that are created by this block will be "
"deregistered. Existing task definitions linked by ARN will never be "
"deregistered. Deregistering a task definition does not remove it from "
"your AWS account, instead it will be marked as INACTIVE."
),
)
match_latest_revision_in_family: bool = Field(
default=False,
description=(
"If enabled, the most recent active revision in the task definition "
"family will be compared against the desired ECS task configuration. "
"If they are equal, the existing task definition will be used instead "
"of registering a new one. If no family is specified the default family "
f'"{ECS_DEFAULT_FAMILY}" will be used.'
),
)
| ECSVariables |
python | kubernetes-client__python | kubernetes/client/models/v1_uncounted_terminated_pods.py | {
"start": 383,
"end": 4413
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'failed': 'list[str]',
'succeeded': 'list[str]'
}
attribute_map = {
'failed': 'failed',
'succeeded': 'succeeded'
}
def __init__(self, failed=None, succeeded=None, local_vars_configuration=None): # noqa: E501
"""V1UncountedTerminatedPods - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._failed = None
self._succeeded = None
self.discriminator = None
if failed is not None:
self.failed = failed
if succeeded is not None:
self.succeeded = succeeded
@property
def failed(self):
"""Gets the failed of this V1UncountedTerminatedPods. # noqa: E501
failed holds UIDs of failed Pods. # noqa: E501
:return: The failed of this V1UncountedTerminatedPods. # noqa: E501
:rtype: list[str]
"""
return self._failed
@failed.setter
def failed(self, failed):
"""Sets the failed of this V1UncountedTerminatedPods.
failed holds UIDs of failed Pods. # noqa: E501
:param failed: The failed of this V1UncountedTerminatedPods. # noqa: E501
:type: list[str]
"""
self._failed = failed
@property
def succeeded(self):
"""Gets the succeeded of this V1UncountedTerminatedPods. # noqa: E501
succeeded holds UIDs of succeeded Pods. # noqa: E501
:return: The succeeded of this V1UncountedTerminatedPods. # noqa: E501
:rtype: list[str]
"""
return self._succeeded
@succeeded.setter
def succeeded(self, succeeded):
"""Sets the succeeded of this V1UncountedTerminatedPods.
succeeded holds UIDs of succeeded Pods. # noqa: E501
:param succeeded: The succeeded of this V1UncountedTerminatedPods. # noqa: E501
:type: list[str]
"""
self._succeeded = succeeded
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1UncountedTerminatedPods):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1UncountedTerminatedPods):
return True
return self.to_dict() != other.to_dict()
| V1UncountedTerminatedPods |
python | google__jax | tests/aot_test.py | {
"start": 1198,
"end": 9290
} | class ____(jtu.JaxTestCase):
@jtu.run_on_devices('tpu', 'gpu')
def test_pickle_jit_lower(self):
def fun(x):
return x * x
with jax.set_mesh(jax.sharding.Mesh(np.array(jax.devices()), ('data',))):
lowered = jax.jit(
fun, in_shardings=P('data'), out_shardings=P(None, 'data')
).lower(core.ShapedArray(shape=(8, 8), dtype=np.float32))
def verify_serialization(lowered):
serialized, in_tree, out_tree = serialize(lowered.compile())
compiled = deserialize_and_load(serialized, in_tree, out_tree)
self.assertEqual(compiled.as_text(), lowered.compile().as_text())
verify_serialization(lowered)
verify_serialization(jax.jit(lambda x: x * x).lower(np.arange(100)))
verify_serialization(
jax.pmap(lambda x: x * x).lower(
np.zeros((len(jax.devices()), 4), dtype=np.float32)))
@jtu.skip_on_devices("tpu") # TODO(phawkins): This test is segfaulting on TPU
def test_topology_jit_serialize(self):
try:
aot_topo = topologies.get_topology_desc(
platform=jax.devices()[0].platform
)
except NotImplementedError:
raise unittest.SkipTest('PJRT Topology not supported')
if jtu.TEST_WITH_PERSISTENT_COMPILATION_CACHE.value:
raise unittest.SkipTest('Compilation caching not yet supported.')
if jtu.is_device_cuda():
raise unittest.SkipTest('Broken on GPU: b/442353988')
@jax.jit
def fn(x):
return x * x
def lower_and_load(mesh):
s = jax.sharding.NamedSharding(mesh, P('x', 'y'))
x_shape = jax.ShapeDtypeStruct(
shape=(16, 16),
dtype=jnp.dtype('float32'),
sharding=s)
lowered = fn.lower(x_shape)
serialized, in_tree, out_tree = serialize(lowered.compile())
compiled = deserialize_and_load(serialized, in_tree, out_tree)
return compiled
ref_topo = topologies.get_attached_topology()
n = max(1, len(ref_topo.devices) // 2)
mesh_shape = (len(ref_topo.devices) // n, n)
ref_mesh = topologies.make_mesh(ref_topo, mesh_shape, ('x', 'y'))
aot_mesh = topologies.make_mesh(aot_topo, mesh_shape, ('x', 'y'))
self.assertEqual(
lower_and_load(ref_mesh).as_text(), lower_and_load(aot_mesh).as_text()
)
def test_get_topology_from_devices(self):
try:
aot_topo = topologies.get_topology_desc(
platform=jax.devices()[0].platform
)
except NotImplementedError:
raise unittest.SkipTest('PJRT Topology not supported')
topo = xc.get_topology_for_devices(aot_topo.devices)
self.assertEqual(
topo.platform_version, aot_topo.devices[0].client.platform_version
)
def test_lower_as_text_with_and_without_debug_info(self):
def my_function(x):
return jnp.sin(x)
lowered = jax.jit(my_function).lower(42.)
stablehlo = lowered.as_text("stablehlo", debug_info=True)
self.assertRegex(stablehlo, r"sine.* loc")
stablehlo = lowered.as_text("stablehlo")
self.assertNotRegex(stablehlo, r"sine.* loc")
hlo = lowered.as_text("hlo", debug_info=True)
self.assertRegex(hlo, r"sine.*metadata=.*source_file=.*")
hlo = lowered.as_text("hlo")
self.assertNotRegex(hlo, r"sine.*metadata=.*source_file=.*")
def test_constants_in_lowering_in_aot(self):
const_size = 100
const = jax.random.uniform(jax.random.key(0), (const_size,),
dtype=np.float32)
def my_function(x):
return jnp.sin(x) + const
lowered = jax.jit(my_function).lower(np.full_like(const, 42., dtype=const.dtype))
stablehlo = lowered.as_text("stablehlo")
if config.use_simplified_jaxpr_constants.value:
self.assertNotRegex(stablehlo, rf"stablehlo.constant dense.*tensor<{const_size}x")
self.assertLen(lowered._lowering.const_args, 1)
self.assertIs(lowered._lowering.const_args[0], const)
else:
self.assertRegex(stablehlo, rf"stablehlo.constant dense.*tensor<{const_size}x")
self.assertLen(lowered._lowering.const_args, 0)
def test_with_constants(self):
const = jnp.arange(16.) + 42. # A distinctive shape and value
@jax.jit
def f(x):
return const[0:8] + x
inp = jnp.arange(8.)
compiled = f.lower(inp).compile()
self.assertLen(compiled.args_info[0], 1) # Not including const_args
self.assertLen(compiled.in_avals[0], 1)
if config.use_simplified_jaxpr_constants.value:
self.assertLen(compiled._params.const_args, 1)
self.assertIs(compiled._params.const_args[0], const)
else:
self.assertLen(compiled._params.const_args, 0)
self.assertArraysEqual(compiled(inp), const[0:8] + inp)
self.assertCacheMisses(lambda: compiled(inp), cpp=0, aot_call=0)
@jtu.parameterized_filterable(
kwargs=[
dict(use_np=use_np, lower=lower, compile=compile, exec=exec)
for use_np in (False, True)
for lower in (False, True)
for compile in (False, True)
for exec in (False, True)
])
def test_with_constants_enable_x64(self, *, use_np, lower, compile, exec):
# Closed-over constant is 64-bit. Each of lowering, compilation, and
# execution can be run in 64-bit or 32-bit mode.
with config.enable_x64(True):
arange = np.arange if use_np else jnp.arange
const = arange(8, dtype=np.int64) + 42
@jax.jit
def f(x):
return lax.convert_element_type(const, np.float32) + x
inp = np.arange(8., dtype=np.float32)
with config.enable_x64(True) if lower else contextlib.nullcontext():
lowered = f.lower(inp)
with config.enable_x64(True) if compile else contextlib.nullcontext():
compiled = lowered.compile()
def run():
with config.enable_x64(True) if exec else contextlib.nullcontext():
return compiled(inp)
self.assertLen(compiled.args_info[0], 1) # Not including const_args
self.assertLen(compiled.in_avals[0], 1)
if config.use_simplified_jaxpr_constants.value:
self.assertLen(compiled._params.const_args, 1)
self.assertLen(compiled._executable.in_avals, 2)
expected_dtype = np.int64
if not config.enable_x64.value and use_np and not lower:
expected_dtype = np.int32
self.assertEqual(compiled._executable.in_avals[0].dtype, expected_dtype)
if expected_dtype is np.int64: # Otherwise, we made a copy of the const
if use_np:
self.assertIs(np.asarray(compiled._params.const_args[0]), const)
else:
self.assertIs(compiled._params.const_args[0], const)
else:
self.assertLen(compiled._params.const_args, 0)
self.assertLen(compiled._executable.in_avals, 1)
# In some cases we expect errors: in 32-bit mode, lowered with 64-bit mode
# and execute in 32-bit mode.
if (config.use_simplified_jaxpr_constants.value and
not config.enable_x64.value and
use_np and lower and not exec):
with self.assertRaisesRegex(
xc.XlaRuntimeError,
"got buffer with incompatible size"):
run()
return
self.assertArraysEqual(run(),
lax.convert_element_type(const, inp.dtype) + inp)
# Trigger cache hit
self.assertCacheMisses(run, cpp=0, aot_call=0)
def test_with_ref_constants(self):
x_ref = core.new_ref(0)
@jax.jit
def f(x):
x_ref[...] += x
f_lowered = f.lower(1)
with self.assertRaisesRegex(ValueError, 'serialize with a closed-over'):
serialized, in_tree, out_tree = serialize(f_lowered.compile())
@jtu.run_on_devices('gpu', 'tpu')
def test_mismatched_backends_raises(self):
@jax.jit
def f(x):
return x * 2
x = jnp.arange(1)
f_lowered = f.lower(x)
serialized, in_tree, out_tree = serialize(f_lowered.compile())
with self.assertRaisesRegex(
ValueError,
'Execution devices belong to a client other than `backend`'):
deserialize_and_load(serialized, in_tree, out_tree, backend='cpu',
execution_devices=jax.devices()[:1])
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| JaxAotTest |
python | getsentry__sentry | src/flagpole/__init__.py | {
"start": 2334,
"end": 6335
} | class ____:
name: str
"The feature name."
owner: str
"The owner of this feature. Either an email address or team name, preferably."
enabled: bool = dataclasses.field(default=True)
"Whether or not the feature is enabled."
segments: list[Segment] = dataclasses.field(default_factory=list)
"The list of segments to evaluate for the feature. An empty list will always evaluate to False."
created_at: str | None = None
"The datetime when this feature was created."
def match(self, context: EvaluationContext) -> bool:
if not self.enabled:
return False
for segment in self.segments:
match = segment.match(context)
if match:
return segment.in_rollout(context)
return False
def validate(self) -> bool:
"""
Validate a feature against the JSON schema.
Will raise if the the current dict form a feature does not match the schema.
"""
dict_data = dataclasses.asdict(self)
spec = load_json_schema()
jsonschema.validate(dict_data, spec)
return True
@classmethod
def from_feature_dictionary(cls, name: str, config_dict: dict[str, Any]) -> Feature:
segment_data = config_dict.get("segments")
if not isinstance(segment_data, list):
raise InvalidFeatureFlagConfiguration("Feature has no segments defined")
try:
segments = [Segment.from_dict(segment) for segment in segment_data]
feature = cls(
name=name,
owner=str(config_dict.get("owner", "")),
enabled=bool(config_dict.get("enabled", True)),
created_at=str(config_dict.get("created_at")),
segments=segments,
)
except Exception as exc:
raise InvalidFeatureFlagConfiguration(
"Provided config_dict is not a valid feature"
) from exc
return feature
@classmethod
def from_feature_config_json(cls, name: str, config_json: str) -> Feature:
try:
config_data_dict = orjson.loads(config_json)
except orjson.JSONDecodeError as decode_error:
raise InvalidFeatureFlagConfiguration("Invalid feature json provided") from decode_error
if not isinstance(config_data_dict, dict):
raise InvalidFeatureFlagConfiguration("Feature JSON is not a valid feature")
if not name:
raise InvalidFeatureFlagConfiguration("Feature name is required")
return cls.from_feature_dictionary(name=name, config_dict=config_data_dict)
@classmethod
def from_bulk_json(cls, json: str) -> list[Feature]:
features: list[Feature] = []
features_json = orjson.loads(json)
for feature, json_dict in features_json.items():
features.append(cls.from_feature_dictionary(name=feature, config_dict=json_dict))
return features
@classmethod
def from_bulk_yaml(cls, yaml_str: str) -> list[Feature]:
features: list[Feature] = []
parsed_yaml = yaml.safe_load(yaml_str)
for feature, yaml_dict in parsed_yaml.items():
features.append(cls.from_feature_dictionary(name=feature, config_dict=yaml_dict))
return features
def to_dict(self) -> dict[str, Any]:
dict_data = dataclasses.asdict(self)
dict_data.pop("name")
return {self.name: dict_data}
def to_yaml_str(self) -> str:
# Add an extra level of indentation by adding a top level dummy config.
# This makes it easier to paste the results into options automator
dump = yaml.dump({"dummy": self.to_dict()})
return "\n".join(dump.split("\n")[1:])
def to_json_str(self) -> str:
return orjson.dumps(self.to_dict()).decode()
__all__ = [
"Feature",
"InvalidFeatureFlagConfiguration",
"ContextBuilder",
"EvaluationContext",
"Segment",
"ConditionBase",
]
| Feature |
python | celery__celery | t/integration/test_tasks.py | {
"start": 22942,
"end": 24615
} | class ____:
args = "CUSTOM ARGS"
kwargs = "CUSTOM KWARGS"
def assert_trace_log(self, caplog, result, expected):
# wait for logs from worker
time.sleep(.01)
records = [(r.name, r.levelno, r.msg, r.data["args"], r.data["kwargs"])
for r in caplog.records
if r.name in {'celery.worker.strategy', 'celery.app.trace'}
if r.data["id"] == result.task_id
]
assert records == [(*e, self.args, self.kwargs) for e in expected]
def call_task_with_reprs(self, task):
return task.set(argsrepr=self.args, kwargsrepr=self.kwargs).delay()
@flaky
def test_task_success(self, caplog):
result = self.call_task_with_reprs(add.s(2, 2))
value = result.get()
assert value == 4
assert result.successful() is True
self.assert_trace_log(caplog, result, [
('celery.worker.strategy', logging.INFO,
celery.app.trace.LOG_RECEIVED,
),
('celery.app.trace', logging.INFO,
celery.app.trace.LOG_SUCCESS,
),
])
@flaky
def test_task_failed(self, caplog):
result = self.call_task_with_reprs(fail.s(2, 2))
with pytest.raises(ExpectedException):
result.get(timeout=5)
assert result.failed() is True
self.assert_trace_log(caplog, result, [
('celery.worker.strategy', logging.INFO,
celery.app.trace.LOG_RECEIVED,
),
('celery.app.trace', logging.ERROR,
celery.app.trace.LOG_FAILURE,
),
])
| test_trace_log_arguments |
python | docker__docker-py | docker/transport/npipeconn.py | {
"start": 669,
"end": 1926
} | class ____(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self):
return NpipeHTTPConnection(
self.npipe_path, self.timeout
)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as ae: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
) from None
# Oh well, we'll create a new connection then
return conn or self._new_conn()
| NpipeHTTPConnectionPool |
python | walkccc__LeetCode | solutions/2734. Lexicographically Smallest String After Substring Operation/2734.py | {
"start": 0,
"end": 334
} | class ____:
def smallestString(self, s: str) -> str:
chars = list(s)
n = len(s)
i = 0
while i < n and chars[i] == 'a':
i += 1
if i == n:
chars[-1] = 'z'
return ''.join(chars)
while i < n and s[i] != 'a':
chars[i] = chr(ord(chars[i]) - 1)
i += 1
return ''.join(chars)
| Solution |
python | apache__airflow | providers/atlassian/jira/src/airflow/providers/atlassian/jira/sensors/jira.py | {
"start": 2392,
"end": 5248
} | class ____(JiraSensor):
"""
Monitors a jira ticket for given change in terms of function.
:param jira_conn_id: reference to a pre-defined Jira Connection
:param ticket_id: id of the ticket to be monitored
:param field: field of the ticket to be monitored
:param expected_value: expected value of the field
:param result_processor: function that return boolean and act as a sensor response
"""
template_fields: Sequence[str] = ("ticket_id",)
def __init__(
self,
*,
jira_conn_id: str = "jira_default",
ticket_id: str | None = None,
field: str | None = None,
expected_value: str | None = None,
field_checker_func: Callable | None = None,
**kwargs,
) -> None:
self.jira_conn_id = jira_conn_id
self.ticket_id = ticket_id
self.field = field
self.expected_value = expected_value
if field_checker_func is None:
field_checker_func = self.issue_field_checker
super().__init__(
jira_conn_id=jira_conn_id, method_name="issue", result_processor=field_checker_func, **kwargs
)
def poke(self, context: Context) -> Any:
self.log.info("Jira Sensor checking for change in ticket: %s", self.ticket_id)
self.method_name = "issue"
self.method_params = {"key": self.ticket_id, "fields": self.field}
return JiraSensor.poke(self, context=context)
def issue_field_checker(self, jira_result: dict) -> bool | None:
"""Check issue using different conditions to prepare to evaluate sensor."""
result = None
if jira_result is not None and self.field is not None and self.expected_value is not None:
field_val = jira_result.get("fields", {}).get(self.field, None)
if field_val is not None:
if isinstance(field_val, list):
result = self.expected_value in field_val
elif isinstance(field_val, str):
result = self.expected_value.lower() == field_val.lower()
elif isinstance(field_val, dict) and field_val.get("name", None):
result = self.expected_value.lower() == field_val.get("name", "").lower()
else:
self.log.warning(
"Not implemented checker for issue field %s which "
"is neither string nor list nor Jira Resource",
self.field,
)
if result is True:
self.log.info(
"Issue field %s has expected value %s, returning success", self.field, self.expected_value
)
else:
self.log.info("Issue field %s don't have expected value %s yet.", self.field, self.expected_value)
return result
| JiraTicketSensor |
python | getsentry__sentry | tests/sentry/integrations/aws_lambda/test_client.py | {
"start": 246,
"end": 2853
} | class ____(TestCase):
@patch.object(boto3, "Session")
@patch.object(boto3, "client")
def test_simple(self, mock_get_client: MagicMock, mock_get_session: MagicMock) -> None:
account_number = "599817902985"
region = "us-west-1"
aws_external_id = "124-343"
mock_client = mock_get_client.return_value
credentials = {
"AccessKeyId": "my_access_key_id",
"SecretAccessKey": "my_secret_access_key",
"SessionToken": "my_session_token",
}
mock_client.assume_role = MagicMock(return_value={"Credentials": credentials})
mock_session = mock_get_session.return_value
mock_session.client = MagicMock(return_value="expected_output")
assert "expected_output" == gen_aws_client(account_number, region, aws_external_id)
mock_get_client.assert_called_once_with(
service_name="sts",
aws_access_key_id="aws-key-id",
aws_secret_access_key="aws-secret-access-key",
region_name="us-east-2",
)
role_arn = "arn:aws:iam::599817902985:role/SentryRole"
mock_client.assume_role.assert_called_once_with(
RoleSessionName="Sentry",
RoleArn=role_arn,
ExternalId=aws_external_id,
Policy=orjson.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["lambda:UpdateFunctionConfiguration", "lambda:GetFunction"],
"Resource": "arn:aws:lambda:us-west-1:599817902985:function:*",
},
{
"Effect": "Allow",
"Action": [
"lambda:ListFunctions",
"lambda:ListLayerVersions",
"lambda:GetLayerVersion",
"organizations:DescribeAccount",
],
"Resource": "*",
},
],
}
).decode(),
)
mock_get_session.assert_called_once_with(
aws_access_key_id=credentials["AccessKeyId"],
aws_secret_access_key=credentials["SecretAccessKey"],
aws_session_token=credentials["SessionToken"],
)
mock_session.client.assert_called_once_with(service_name="lambda", region_name="us-west-1")
| AwsLambdaClientTest |
python | milvus-io__pymilvus | pymilvus/client/search_result.py | {
"start": 25757,
"end": 31277
} | class ____(UserDict):
"""Enhanced result in dict that can get data in dict[dict]
Examples:
>>> res = {
>>> "my_id": 1,
>>> "distance": 0.3,
>>> "entity": {
>>> "emb": [1, 2, 3],
>>> "desc": "a description"
>>> }
>>> }
>>> h = Hit(res, pk_name="my_id")
>>> h
{"my_id": 1, "distance": 0.3, "entity": {"emb": [1, 2, 3], "desc": "a description"}}
>>> h["my_id"]
1
>>> h["distance"]
0.3
>>> h["entity"]["emb"]
[1, 2, 3]
>>> h["entity"]["desc"]
"a description"
>>> h.get("emb")
[1, 2, 3]
"""
def __init__(self, *args, pk_name: str = "", **kwargs):
super().__init__(*args, **kwargs)
self._pk_name = pk_name
def __getattr__(self, item: str):
"""Patch for orm, will be deprecated soon"""
# hit.entity return self
if item == "entity":
return self
try:
return self.__getitem__(item)
except KeyError as exc:
raise AttributeError from exc
def to_dict(self) -> Dict[str, Any]:
"""Patch for orm, will be deprecated soon"""
return self
@property
def id(self) -> Union[str, int]:
"""Patch for orm, will be deprecated soon"""
return self.data.get(self._pk_name)
@property
def distance(self) -> float:
"""Patch for orm, will be deprecated soon"""
return self.data.get("distance")
@property
def pk(self) -> Union[str, int]:
"""Alias of id, will be deprecated soon"""
return self.id
@property
def score(self) -> float:
"""Alias of distance, will be deprecated soon"""
return self.distance
@property
def fields(self) -> Dict[str, Any]:
"""Patch for orm, will be deprecated soon"""
return self.get("entity")
def __getitem__(self, key: str):
try:
return self.data[key]
except KeyError:
pass
return self.data["entity"][key]
def get(self, key: Any, default: Any = None):
try:
return self.__getitem__(key)
except KeyError:
pass
return default
def extract_array_row_data(
scalars: List[schema_pb2.ScalarField], element_type: DataType
) -> List[List[Any]]:
row = []
for ith_array in scalars:
if ith_array is None:
row.append(None)
continue
if element_type == DataType.INT64:
row.append(ith_array.long_data.data)
continue
if element_type == DataType.BOOL:
row.append(ith_array.bool_data.data)
continue
if element_type in (DataType.INT8, DataType.INT16, DataType.INT32):
row.append(ith_array.int_data.data)
continue
if element_type == DataType.FLOAT:
row.append(ith_array.float_data.data)
continue
if element_type == DataType.DOUBLE:
row.append(ith_array.double_data.data)
continue
if element_type in (DataType.STRING, DataType.VARCHAR):
row.append(ith_array.string_data.data)
continue
return row
def apply_valid_data(
data: List[Any], valid_data: Union[None, List[bool]], start: int, end: int
) -> List[Any]:
if valid_data:
for i, valid in enumerate(valid_data[start:end]):
if not valid:
data[i] = None
return data
def extract_struct_field_value(field_data: schema_pb2.FieldData, index: int) -> Any:
"""Extract a single value from a struct field at the given index."""
if field_data.type == DataType.BOOL:
if index < len(field_data.scalars.bool_data.data):
return field_data.scalars.bool_data.data[index]
elif field_data.type in (DataType.INT8, DataType.INT16, DataType.INT32):
if index < len(field_data.scalars.int_data.data):
return field_data.scalars.int_data.data[index]
elif field_data.type == DataType.INT64:
if index < len(field_data.scalars.long_data.data):
return field_data.scalars.long_data.data[index]
elif field_data.type == DataType.FLOAT:
if index < len(field_data.scalars.float_data.data):
return np.single(field_data.scalars.float_data.data[index])
elif field_data.type == DataType.DOUBLE:
if index < len(field_data.scalars.double_data.data):
return field_data.scalars.double_data.data[index]
elif field_data.type == DataType.VARCHAR:
if index < len(field_data.scalars.string_data.data):
return field_data.scalars.string_data.data[index]
elif field_data.type == DataType.JSON:
if index < len(field_data.scalars.json_data.data):
return orjson.loads(field_data.scalars.json_data.data[index])
elif field_data.type == DataType.FLOAT_VECTOR:
dim = field_data.vectors.dim
start_idx = index * dim
end_idx = start_idx + dim
if end_idx <= len(field_data.vectors.float_vector.data):
return field_data.vectors.float_vector.data[start_idx:end_idx]
elif field_data.type == DataType.BINARY_VECTOR:
dim = field_data.vectors.dim // 8
start_idx = index * dim
end_idx = start_idx + dim
if end_idx <= len(field_data.vectors.binary_vector):
return field_data.vectors.binary_vector[start_idx:end_idx]
return None
| Hit |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 40087,
"end": 43106
} | class ____:
def __init__(self, parent, vision_kwargs=None, qformer_kwargs=None, is_training=True):
if vision_kwargs is None:
vision_kwargs = {}
if qformer_kwargs is None:
qformer_kwargs = {"use_qformer_text_input": True}
self.parent = parent
self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs)
self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs)
self.is_training = is_training
self.batch_size = self.vision_model_tester.batch_size # need bs for batching_equivalence test
def get_config(self):
return Blip2Config(
vision_config=self.vision_model_tester.get_config(),
qformer_config=self.qformer_model_tester.get_config(),
)
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.qformer_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
def create_and_check_model(self, config, input_ids, attention_mask):
model = Blip2TextModelWithProjection(config=config)
model.set_attn_implementation("eager")
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=attention_mask, output_attentions=True, output_hidden_states=True)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.vision_model_tester.batch_size, input_ids.shape[1], self.qformer_model_tester.hidden_size),
)
self.parent.assertEqual(
result.text_embeds.shape,
(
self.vision_model_tester.batch_size,
input_ids.shape[1],
config.image_text_hidden_size,
),
)
with torch.no_grad():
result2 = model(
input_ids,
attention_mask=attention_mask,
return_dict=not config.use_return_dict,
output_attentions=True,
output_hidden_states=True,
)
self.parent.assertTrue(torch.allclose(result.text_embeds, result2[0]))
self.parent.assertTrue(torch.allclose(result.last_hidden_state, result2[1]))
self.parent.assertTrue(torch.allclose(result.hidden_states[0], result2[2][0]))
self.parent.assertTrue(torch.allclose(result.hidden_states[1], result2[2][1]))
self.parent.assertTrue(torch.allclose(result.attentions[0], result2[3][0]))
self.parent.assertTrue(torch.allclose(result.attentions[1], result2[3][1]))
@require_torch
| Blip2TextModelWithProjectionTester |
python | pypa__pip | src/pip/_internal/models/pylock.py | {
"start": 1189,
"end": 1438
} | class ____:
url: str | None
# (not supported) path: Optional[str]
# (not supported) size: Optional[int]
# (not supported) upload_time: Optional[datetime]
hashes: dict[str, str]
subdirectory: str | None
@dataclass
| PackageArchive |
python | plotly__plotly.py | plotly/graph_objs/volume/caps/_x.py | {
"start": 233,
"end": 4011
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume.caps"
_path_str = "volume.caps.x"
_valid_props = {"fill", "show"}
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the x `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the x `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new X object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.volume.caps.X`
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the x `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
X
"""
super().__init__("x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.caps.X
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.caps.X`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("fill", arg, fill)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| X |
python | pytorch__pytorch | torch/_dynamo/guards.py | {
"start": 130551,
"end": 131446
} | class ____:
def __init__(self, reason: Optional[str] = None) -> None:
self._reason = reason
def __repr__(self) -> str:
return f"_Missing({self._reason})"
def __str__(self) -> str:
return f"_Missing({self._reason})"
# Sometimes _Missing object is used as the callable with functools.partial,
# so we add a dummy __call__ here to bypass TypeError from partial().
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return _Missing()
@functools.cache
def _get_unsupported_types() -> tuple[type, ...]:
# We only do ID_MATCH on C objects which is already banned from guards serialization.
ret: tuple[type, ...] = (
types.CodeType,
torch._C.Stream,
weakref.ReferenceType,
)
try:
ret += (torch._C._distributed_c10d.ProcessGroup,)
except AttributeError:
pass
return ret
| _Missing |
python | google__pytype | pytype/pytd/pytd_utils.py | {
"start": 3826,
"end": 8664
} | class ____:
"""Base class for modules that match types against each other.
Maps pytd node types (<type1>, <type2>) to a method "match_<type1>_<type2>".
So e.g. to write a matcher that compares Functions by name, you would write:
class MyMatcher(TypeMatcher):
def match_Function_Function(self, f1, f2):
return f1.name == f2.name
"""
def default_match(self, t1, t2):
return t1 == t2
def match(self, t1, t2, *args, **kwargs):
name1 = t1.__class__.__name__
name2 = t2.__class__.__name__
f = getattr(self, "match_" + name1 + "_against_" + name2, None)
if f:
return f(t1, t2, *args, **kwargs)
else:
return self.default_match(t1, t2, *args, **kwargs)
def CanonicalOrdering(n):
"""Convert a PYTD node to a canonical (sorted) ordering."""
return n.Visit(pytd_visitors.CanonicalOrderingVisitor())
def GetAllSubClasses(ast):
"""Compute a class->subclasses mapping.
Args:
ast: Parsed PYTD.
Returns:
A dictionary, mapping instances of pytd.Type (types) to lists of
pytd.Class (the derived classes).
"""
hierarchy = ast.Visit(pytd_visitors.ExtractSuperClasses())
hierarchy = {
cls: list(superclasses) for cls, superclasses in hierarchy.items()
}
return utils.invert_dict(hierarchy)
def Print(ast, multiline_args=False):
return ast.Visit(printer.PrintVisitor(multiline_args))
def MakeTypeAnnotation(ast, multiline_args=False):
"""Returns a type annotation and any added typing imports."""
vis = printer.PrintVisitor(multiline_args)
annotation = ast.Visit(vis)
return annotation, vis.typing_imports
def CreateModule(name="<empty>", **kwargs):
module = pytd.TypeDeclUnit(
name, type_params=(), constants=(), classes=(), functions=(), aliases=()
)
return module.Replace(**kwargs)
def WrapTypeDeclUnit(name, items):
"""Given a list (classes, functions, etc.), wrap a pytd around them.
Args:
name: The name attribute of the resulting TypeDeclUnit.
items: A list of items. Can contain pytd.Class, pytd.Function and
pytd.Constant.
Returns:
A pytd.TypeDeclUnit.
Raises:
ValueError: In case of an invalid item in the list.
NameError: For name conflicts.
"""
functions = {}
classes = {}
constants = collections.defaultdict(TypeBuilder)
aliases = {}
typevars = {}
for item in items:
if isinstance(item, pytd.Function):
if item.name in functions:
if item.kind != functions[item.name].kind:
raise ValueError(
f"Can't combine {item.kind} and {functions[item.name].kind}"
)
functions[item.name] = pytd.Function(
item.name,
functions[item.name].signatures + item.signatures,
item.kind,
)
else:
functions[item.name] = item
elif isinstance(item, pytd.Class):
if item.name in classes:
raise NameError(f"Duplicate top level class: {item.name!r}")
classes[item.name] = item
elif isinstance(item, pytd.Constant):
constants[item.name].add_type(item.type)
elif isinstance(item, pytd.Alias):
if item.name in aliases:
raise NameError(f"Duplicate top level alias or import: {item.name!r}")
aliases[item.name] = item
elif isinstance(item, pytd.TypeParameter):
if item.name in typevars:
raise NameError(f"Duplicate top level type parameter: {item.name!r}")
typevars[item.name] = item
else:
raise ValueError(f"Invalid top level pytd item: {type(item)!r}")
categories = {
"function": functions,
"class": classes,
"constant": constants,
"alias": aliases,
"typevar": typevars,
}
for c1, c2 in itertools.combinations(categories, 2):
_check_intersection(categories[c1], categories[c2], c1, c2)
return pytd.TypeDeclUnit(
name=name,
constants=tuple(
pytd.Constant(name, t.build())
for name, t in sorted(constants.items())
),
type_params=tuple(typevars.values()),
classes=tuple(classes.values()),
functions=tuple(functions.values()),
aliases=tuple(aliases.values()),
)
def _check_intersection(items1, items2, name1, name2):
"""Check for duplicate identifiers."""
items = set(items1) & set(items2)
if items:
if len(items) == 1:
raise NameError(
"Top level identifier %r is both %s and %s"
% (list(items)[0], name1, name2)
)
max_items = 5 # an arbitrary value
if len(items) > max_items:
raise NameError(
"Top level identifiers %s, ... are both %s and %s"
% ", ".join(map(repr, sorted(items)[:max_items])),
name1,
name2,
)
raise NameError(
"Top level identifiers %s are both %s and %s"
% (", ".join(map(repr, sorted(items))), name1, name2)
)
| TypeMatcher |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 9262,
"end": 9333
} | class ____(HTTPClientError):
status_code = 421
| HTTPMisdirectedRequest |
python | ansible__ansible | lib/ansible/modules/service.py | {
"start": 17195,
"end": 40096
} | class ____(Service):
"""
This is the Linux Service manipulation class - it is currently supporting
a mixture of binaries and init scripts for controlling services started at
boot, as well as for controlling the current state.
"""
platform = 'Linux'
distribution = None
def get_service_tools(self):
paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
binaries = ['service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv']
initpaths = ['/etc/init.d']
location = dict()
for binary in binaries:
location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
for initdir in initpaths:
initscript = "%s/%s" % (initdir, self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
def check_systemd():
# tools must be installed
if location.get('systemctl', False):
return is_systemd_managed(self.module)
return False
# Locate a tool to enable/disable a service
if check_systemd():
# service is managed by systemd
self.__systemd_unit = self.name
self.svc_cmd = location['systemctl']
self.enable_cmd = location['systemctl']
elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
# service is managed by upstart
self.enable_cmd = location['initctl']
# set the upstart version based on the output of 'initctl version'
self.upstart_version = LooseVersion('0.0.0')
try:
version_re = re.compile(r'\(upstart (.*)\)')
rc, stdout, stderr = self.module.run_command('%s version' % location['initctl'])
if rc == 0:
res = version_re.search(stdout)
if res:
self.upstart_version = LooseVersion(res.groups()[0])
except Exception:
pass # we'll use the default of 0.0.0
self.svc_cmd = location['initctl']
elif location.get('rc-service', False):
# service is managed by OpenRC
self.svc_cmd = location['rc-service']
self.enable_cmd = location['rc-update']
return # already have service start/stop tool too!
elif self.svc_initscript:
# service is managed by with SysV init scripts
if location.get('update-rc.d', False):
# and uses update-rc.d
self.enable_cmd = location['update-rc.d']
elif location.get('insserv', None):
# and uses insserv
self.enable_cmd = location['insserv']
elif location.get('chkconfig', False):
# and uses chkconfig
self.enable_cmd = location['chkconfig']
if self.enable_cmd is None:
fail_if_missing(self.module, False, self.name, msg='host')
# If no service control tool selected yet, try to see if 'service' is available
if self.svc_cmd is None and location.get('service', False):
self.svc_cmd = location['service']
# couldn't find anything yet
if self.svc_cmd is None and not self.svc_initscript:
self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
if location.get('initctl', False):
self.svc_initctl = location['initctl']
def get_systemd_service_enabled(self):
def sysv_exists(name):
script = '/etc/init.d/' + name
return os.access(script, os.X_OK)
def sysv_is_enabled(name):
return bool(glob.glob('/etc/rc?.d/S??' + name))
service_name = self.__systemd_unit
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
if rc == 0:
return True
elif out.startswith('disabled'):
return False
elif sysv_exists(service_name):
return sysv_is_enabled(service_name)
else:
return False
def get_systemd_status_dict(self):
# Check status first as show will not fail if service does not exist
(rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
if rc != 0:
self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
elif 'LoadState=not-found' in out:
self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
key = None
value_buffer = []
status_dict = {}
for line in out.splitlines():
if '=' in line:
if not key:
key, value = line.split('=', 1)
# systemd fields that are shell commands can be multi-line
# We take a value that begins with a "{" as the start of
# a shell command and a line that ends with "}" as the end of
# the command
if value.lstrip().startswith('{'):
if value.rstrip().endswith('}'):
status_dict[key] = value
key = None
else:
value_buffer.append(value)
else:
status_dict[key] = value
key = None
else:
if line.rstrip().endswith('}'):
status_dict[key] = '\n'.join(value_buffer)
key = None
else:
value_buffer.append(value)
else:
value_buffer.append(value)
return status_dict
def get_systemd_service_status(self):
d = self.get_systemd_status_dict()
if d.get('ActiveState') == 'active':
# run-once services (for which a single successful exit indicates
# that they are running as designed) should not be restarted here.
# Thus, we are not checking d['SubState'].
self.running = True
self.crashed = False
elif d.get('ActiveState') == 'failed':
self.running = False
self.crashed = True
elif d.get('ActiveState') is None:
self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
else:
self.running = False
self.crashed = False
return self.running
def get_service_status(self):
if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
return self.get_systemd_service_status()
self.action = "status"
rc, status_stdout, status_stderr = self.service_control()
# if we have decided the service is managed by upstart, we check for some additional output...
if self.svc_initctl and self.running is None:
# check the job status by upstart response
initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s %s" % (self.svc_initctl, self.name, self.arguments))
if "stop/waiting" in initctl_status_stdout:
self.running = False
elif "start/running" in initctl_status_stdout:
self.running = True
if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
self.running = "started" in openrc_status_stdout
self.crashed = "crashed" in openrc_status_stderr
# Prefer a non-zero return code. For reference, see:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
if self.running is None and rc in [1, 2, 3, 4, 69]:
self.running = False
# if the job status is still not known check it by status output keywords
# Only check keywords if there's only one line of output (some init
# scripts will output verbosely in case of error and those can emit
# keywords that are picked up as false positives
if self.running is None and status_stdout.count('\n') <= 1:
# first transform the status output that could irritate keyword matching
cleanout = status_stdout.lower().replace(self.name.lower(), '')
if "stop" in cleanout:
self.running = False
elif "run" in cleanout:
self.running = not ("not " in cleanout)
elif "start" in cleanout and "not " not in cleanout:
self.running = True
elif 'could not access pid file' in cleanout:
self.running = False
elif 'is dead and pid file exists' in cleanout:
self.running = False
elif 'dead but subsys locked' in cleanout:
self.running = False
elif 'dead but pid file exists' in cleanout:
self.running = False
# if the job status is still not known and we got a zero for the
# return code, assume here that the service is running
if self.running is None and rc == 0:
self.running = True
# if the job status is still not known check it by special conditions
if self.running is None:
if self.name == 'iptables' and "ACCEPT" in status_stdout:
# iptables status command output is lame
# TODO: lookup if we can use a return code for this instead?
self.running = True
return self.running
def service_enable(self):
if self.enable_cmd is None:
self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
self.changed = True
action = None
#
# Upstart's initctl
#
if self.enable_cmd.endswith("initctl"):
def write_to_override_file(file_name, file_contents, ):
with open(file_name, 'w') as override_file:
override_file.write(file_contents)
initpath = '/etc/init'
if self.upstart_version >= LooseVersion('0.6.7'):
manreg = re.compile(r'^manual\s*$', re.M | re.I)
config_line = 'manual\n'
else:
manreg = re.compile(r'^start on manual\s*$', re.M | re.I)
config_line = 'start on manual\n'
conf_file_name = "%s/%s.conf" % (initpath, self.name)
override_file_name = "%s/%s.override" % (initpath, self.name)
# Check to see if files contain the manual line in .conf and fail if True
with open(conf_file_name) as conf_file_fh:
conf_file_content = conf_file_fh.read()
if manreg.search(conf_file_content):
self.module.fail_json(msg="manual stanza not supported in a .conf file")
self.changed = False
if os.path.exists(override_file_name):
with open(override_file_name) as override_fh:
override_file_contents = override_fh.read()
# Remove manual stanza if present and service enabled
if self.enable and manreg.search(override_file_contents):
self.changed = True
override_state = manreg.sub('', override_file_contents)
# Add manual stanza if not present and service disabled
elif not (self.enable) and not (manreg.search(override_file_contents)):
self.changed = True
override_state = '\n'.join((override_file_contents, config_line))
# service already in desired state
else:
pass
# Add file with manual stanza if service disabled
elif not (self.enable):
self.changed = True
override_state = config_line
else:
# service already in desired state
pass
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
# The initctl method of enabling and disabling services is much
# different than for the other service methods. So actually
# committing the change is done in this conditional and then we
# skip the boilerplate at the bottom of the method
if self.changed:
try:
write_to_override_file(override_file_name, override_state)
except Exception:
self.module.fail_json(msg='Could not modify override file')
return
#
# SysV's chkconfig
#
if self.enable_cmd.endswith("chkconfig"):
if self.enable:
action = 'on'
else:
action = 'off'
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if 'chkconfig --add %s' % self.name in err:
self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if self.name not in out:
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
# TODO: look back on why this is here
# state = out.split()[-1]
# Check if we're already in the correct state
if "3:%s" % action in out and "5:%s" % action in out:
self.changed = False
return
#
# Systemd's systemctl
#
if self.enable_cmd.endswith("systemctl"):
if self.enable:
action = 'enable'
else:
action = 'disable'
# Check if we're already in the correct state
service_enabled = self.get_systemd_service_enabled()
# self.changed should already be true
if self.enable == service_enabled:
self.changed = False
return
#
# OpenRC's rc-update
#
if self.enable_cmd.endswith("rc-update"):
if self.enable:
action = 'add'
else:
action = 'delete'
(rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
for line in out.splitlines():
service_name, runlevels = line.split('|')
service_name = service_name.strip()
if service_name != self.name:
continue
runlevels = re.split(r'\s+', runlevels)
# service already enabled for the runlevel
if self.enable and self.runlevel in runlevels:
self.changed = False
# service already disabled for the runlevel
elif not self.enable and self.runlevel not in runlevels:
self.changed = False
break
else:
# service already disabled altogether
if not self.enable:
self.changed = False
if not self.changed:
return
#
# update-rc.d style
#
if self.enable_cmd.endswith("update-rc.d"):
enabled = False
slinks = glob.glob('/etc/rc?.d/S??' + self.name)
if slinks:
enabled = True
if self.enable != enabled:
self.changed = True
if self.enable:
action = 'enable'
klinks = glob.glob('/etc/rc?.d/K??' + self.name)
if not klinks:
if not self.module.check_mode:
(rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
action = 'disable'
if not self.module.check_mode:
(rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
self.changed = False
return
#
# insserv (Debian <=7, SLES, others)
#
if self.enable_cmd.endswith("insserv"):
if self.enable:
(rc, out, err) = self.execute_command("%s -n -v %s" % (self.enable_cmd, self.name))
else:
(rc, out, err) = self.execute_command("%s -n -r -v %s" % (self.enable_cmd, self.name))
self.changed = False
for line in err.splitlines():
if self.enable and line.find('enable service') != -1:
self.changed = True
break
if not self.enable and line.find('remove service') != -1:
self.changed = True
break
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
if not self.changed:
return
if self.enable:
(rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
else:
(rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
#
# If we've gotten to the end, the service needs to be updated
#
self.changed = True
# we change argument order depending on real binary used:
# rc-update and systemctl need the argument order reversed
if self.enable_cmd.endswith("rc-update"):
args = (self.enable_cmd, action, self.name + " " + self.runlevel)
elif self.enable_cmd.endswith("systemctl"):
args = (self.enable_cmd, action, self.__systemd_unit)
else:
args = (self.enable_cmd, self.name, action)
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
(rc, out, err) = self.execute_command("%s %s %s" % args)
if rc != 0:
if err:
self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
else:
self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
return (rc, out, err)
def service_control(self):
# Decide what command to run
svc_cmd = ''
arguments = self.arguments
if self.svc_cmd:
if not self.svc_cmd.endswith("systemctl"):
if self.svc_cmd.endswith("initctl"):
# initctl commands take the form <cmd> <action> <name>
svc_cmd = self.svc_cmd
arguments = "%s %s" % (self.name, arguments)
else:
# SysV and OpenRC take the form <cmd> <name> <action>
svc_cmd = "%s %s" % (self.svc_cmd, self.name)
else:
# systemd commands take the form <cmd> <action> <name>
svc_cmd = self.svc_cmd
arguments = "%s %s" % (self.__systemd_unit, arguments)
elif self.svc_cmd is None and self.svc_initscript:
# upstart
svc_cmd = "%s" % self.svc_initscript
# In OpenRC, if a service crashed, we need to reset its status to
# stopped with the zap command, before we can start it back.
if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
self.execute_command("%s zap" % svc_cmd, daemonize=True)
if self.action != "restart":
if svc_cmd != '':
# upstart or systemd or OpenRC
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# SysV
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
# All services in OpenRC support restart.
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# In other systems, not all services support restart. Do it the hard way.
if svc_cmd != '':
# upstart or systemd
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
else:
# SysV
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
if self.sleep:
time.sleep(self.sleep)
if svc_cmd != '':
# upstart or systemd
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
else:
# SysV
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
# merge return information
if rc1 != 0 and rc2 == 0:
rc_state = rc2
stdout = stdout2
stderr = stderr2
else:
rc_state = rc1 + rc2
stdout = stdout1 + stdout2
stderr = stderr1 + stderr2
return (rc_state, stdout, stderr)
| LinuxService |
python | doocs__leetcode | solution/0600-0699/0686.Repeated String Match/Solution.py | {
"start": 0,
"end": 301
} | class ____:
def repeatedStringMatch(self, a: str, b: str) -> int:
m, n = len(a), len(b)
ans = ceil(n / m)
t = [a] * ans
for _ in range(3):
if b in ''.join(t):
return ans
ans += 1
t.append(a)
return -1
| Solution |
python | walkccc__LeetCode | solutions/2085. Count Common Words With One Occurrence/2085.py | {
"start": 0,
"end": 275
} | class ____:
def countWords(self, words1: list[str], words2: list[str]) -> int:
count = collections.Counter(words1)
for word in words2:
if word in count and count[word] < 2:
count[word] -= 1
return sum(value == 0 for value in count.values())
| Solution |
python | ethereum__web3.py | tests/core/method-class/test_result_formatters.py | {
"start": 341,
"end": 496
} | class ____(BaseProvider):
def make_request(method, params):
raise NotImplementedError
result_for_test = {"method_for_test": "ok"}
| DummyProvider |
python | scipy__scipy | scipy/sparse/linalg/_eigen/arpack/arpack.py | {
"start": 16921,
"end": 26216
} | class ____(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None,
sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0,
rng=None):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError(f"mode={mode} not implemented")
if which not in _SEUPD_WHICH:
raise ValueError(f"which must be one of {' '.join(_SEUPD_WHICH)}")
if k >= n:
raise ValueError(f"k must be less than ndim(A), k={k}")
self.rng = np.random.default_rng(rng)
_ArpackParams.__init__(self, n, k, tp, self.rng, mode, sigma, ncv, v0,
maxiter, which, tol)
self.arpack_dict['bmat'] = 0 if self.bmat == 'I' else 1
if self.ncv > n or self.ncv <= k:
raise ValueError(f"ncv must be k<ncv<=n, ncv={self.ncv}")
self.workd = np.zeros(3 * n, dtype=self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), dtype=self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpacklib.__dict__[ltr + 'saupd_wrap']
self._arpack_extract = _arpacklib.__dict__[ltr + 'seupd_wrap']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, dtype=np.int32)
def iterate(self):
self._arpack_solver(self.arpack_dict, self.resid, self.v, self.ipntr,
self.workd, self.workl)
xslice = slice(self.ipntr[0], self.ipntr[0] + self.n)
yslice = slice(self.ipntr[1], self.ipntr[1] + self.n)
if self.arpack_dict['ido'] == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2], self.ipntr[2] + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2], self.ipntr[2] + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.arpack_dict['ido'] == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.arpack_dict['ido'] == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
elif self.arpack_dict['ido'] == 4:
# Generate random vector into resid
self.resid[:] = self.rng.uniform(low=-1.0, high=1.0,
size=[self.n]).astype(self.tp)
elif self.arpack_dict['ido'] == 5:
self.workd[yslice] = self.OP(self.workd[xslice])
else:
self.converged = True
if self.arpack_dict['info'] == 0:
pass
elif self.arpack_dict['info'] == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.arpack_dict['info'],
infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
self.arpack_dict['info'] = 0 # Clear, if any, previous error from naupd
howmny = HOWMNY_DICT["A"] # return all eigenvectors
sselect = np.zeros(self.ncv, dtype=np.int32)
d = np.zeros(self.k, dtype=self.tp)
z = np.zeros((self.n, self.ncv), dtype=self.tp, order='F')
self._arpack_extract(
self.arpack_dict, rvec, howmny, sselect, d, z, self.sigma,
self.resid, self.v, self.ipntr, self.workd, #[0:2 * self.n],
self.workl
)
ierr = self.arpack_dict['info']
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.arpack_dict['nconv']
d = d[:k_ok]
if return_eigenvectors:
z = z[:, :k_ok].copy(order='C')
return d, z
else:
return d
| _SymmetricArpackParams |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 3057,
"end": 3326
} | class ____(BaseSafeMigrationTest):
app = "good_flow_add_column_with_notnull_db_default_app"
migrate_from = "0001_initial"
migrate_to = "0002_add_field_notnull_db_default"
def test(self) -> None:
self.run_migration()
| AddColWithNotNullDbDefaultTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 10428,
"end": 10499
} | class ____(B23):
def m1(self, a):
return add_feature_c(a)
| C23 |
python | pandas-dev__pandas | pandas/tests/plotting/test_boxplot_method.py | {
"start": 1037,
"end": 15372
} | class ____:
def test_stacked_boxplot_set_axis(self):
# GH2980
n = 30
df = DataFrame(
{
"Clinical": np.random.default_rng(2).choice([0, 1, 2, 3], n),
"Confirmed": np.random.default_rng(2).choice([0, 1, 2, 3], n),
"Discarded": np.random.default_rng(2).choice([0, 1, 2, 3], n),
},
index=np.arange(0, n),
)
ax = df.plot(kind="bar", stacked=True)
assert [int(x.get_text()) for x in ax.get_xticklabels()] == df.index.to_list()
ax.set_xticks(np.arange(0, n, 10))
plt.draw() # Update changes
assert [int(x.get_text()) for x in ax.get_xticklabels()] == list(
np.arange(0, n, 10)
)
@pytest.mark.slow
@pytest.mark.parametrize(
"kwargs, warn",
[
[{"return_type": "dict"}, None],
[{"column": ["one", "two"]}, None],
[{"column": ["one", "two"], "by": "indic"}, UserWarning],
[{"column": ["one"], "by": ["indic", "indic2"]}, None],
[{"by": "indic"}, UserWarning],
[{"by": ["indic", "indic2"]}, UserWarning],
[{"notch": 1}, None],
[{"by": "indic", "notch": 1}, UserWarning],
],
)
def test_boxplot_legacy1(self, kwargs, warn):
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
# _check_plot_works can add an ax so catch warning. see GH #13188
with tm.assert_produces_warning(warn, check_stacklevel=False):
_check_plot_works(df.boxplot, **kwargs)
def test_boxplot_legacy1_series(self):
ser = Series(np.random.default_rng(2).standard_normal(6))
_check_plot_works(plotting._core.boxplot, data=ser, return_type="dict")
def test_boxplot_legacy2(self):
df = DataFrame(
np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
)
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
_check_plot_works(df.boxplot, by="X")
def test_boxplot_legacy2_with_ax(self):
df = DataFrame(
np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
)
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
_, ax = mpl.pyplot.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
def test_boxplot_legacy2_with_ax_return_type(self):
df = DataFrame(
np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
)
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
fig, ax = mpl.pyplot.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
def test_boxplot_legacy2_with_multi_col(self):
df = DataFrame(
np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
)
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
# Multiple columns with an ax argument should use same figure
fig, ax = mpl.pyplot.subplots()
msg = "the figure containing the passed axes is being cleared"
with tm.assert_produces_warning(UserWarning, match=msg):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
def test_boxplot_legacy2_by_none(self):
df = DataFrame(
np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
)
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
# When by is None, check that all relevant lines are present in the
# dict
_, ax = mpl.pyplot.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
def test_boxplot_return_type_none(self, hist_df):
# GH 12216; return_type=None & by=None -> axes
result = hist_df.boxplot()
assert isinstance(result, mpl.pyplot.Axes)
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
msg = "return_type must be {'axes', 'dict', 'both'}"
with pytest.raises(ValueError, match=msg):
df.boxplot(return_type="NOT_A_TYPE")
result = df.boxplot()
_check_box_return_type(result, "axes")
@pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
def test_boxplot_return_type_legacy_return_type(self, return_type):
# API change in https://github.com/pandas-dev/pandas/pull/7096
df = DataFrame(
np.random.default_rng(2).standard_normal((6, 4)),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with tm.assert_produces_warning(False):
result = df.boxplot(return_type=return_type)
_check_box_return_type(result, return_type)
def test_boxplot_axis_limits(self, hist_df):
df = hist_df.copy()
df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
def test_boxplot_axis_limits_two_rows(self, hist_df):
df = hist_df.copy()
df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0])
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
def test_boxplot_empty_column(self):
df = DataFrame(np.random.default_rng(2).standard_normal((20, 4)))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
def test_figsize(self):
df = DataFrame(
np.random.default_rng(2).random((10, 5)), columns=["A", "B", "C", "D", "E"]
)
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
_check_ticks_props(df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=10),
"b": np.random.default_rng(2).standard_normal(10),
"c": np.random.default_rng(2).standard_normal(10) + 2,
"d": date_range("2012-01-01", periods=10).astype(str),
"e": date_range("2012-01-01", periods=10, tz="UTC"),
"f": timedelta_range("1 days", periods=10),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
{"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
{"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
),
({"boxes": "r"}, {"boxes": "r"}),
("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(np.random.default_rng(2).random((10, 2)))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"scheme,expected",
[
(
"dark_background",
{
"boxes": "#8dd3c7",
"whiskers": "#8dd3c7",
"medians": "#bfbbd9",
"caps": "#8dd3c7",
},
),
(
"default",
{
"boxes": "#1f77b4",
"whiskers": "#1f77b4",
"medians": "#2ca02c",
"caps": "#1f77b4",
},
),
],
)
def test_colors_in_theme(self, scheme, expected):
# GH: 40769
df = DataFrame(np.random.default_rng(2).random((10, 2)))
plt.style.use(scheme)
result = df.plot.box(return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(np.random.default_rng(2).random((10, 2)))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.default_rng(2).random(10) for k in "ABC"})
kwd = {props: {"color": "C1"}}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
def test_plot_xlabel_ylabel(self, vert):
df = DataFrame(
{
"a": np.random.default_rng(2).standard_normal(10),
"b": np.random.default_rng(2).standard_normal(10),
"group": np.random.default_rng(2).choice(["group1", "group2"], 10),
}
)
xlabel, ylabel = "x", "y"
ax = df.plot(kind="box", xlabel=xlabel, ylabel=ylabel, **vert)
assert ax.get_xlabel() == xlabel
assert ax.get_ylabel() == ylabel
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
def test_plot_box(self, vert):
# GH 54941
rng = np.random.default_rng(2)
df1 = DataFrame(rng.integers(0, 100, size=(10, 4)), columns=list("ABCD"))
df2 = DataFrame(rng.integers(0, 100, size=(10, 4)), columns=list("ABCD"))
xlabel, ylabel = "x", "y"
_, axs = plt.subplots(ncols=2, figsize=(10, 7), sharey=True)
df1.plot.box(ax=axs[0], xlabel=xlabel, ylabel=ylabel, **vert)
df2.plot.box(ax=axs[1], xlabel=xlabel, ylabel=ylabel, **vert)
for ax in axs:
assert ax.get_xlabel() == xlabel
assert ax.get_ylabel() == ylabel
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
def test_boxplot_xlabel_ylabel(self, vert):
df = DataFrame(
{
"a": np.random.default_rng(2).standard_normal(10),
"b": np.random.default_rng(2).standard_normal(10),
"group": np.random.default_rng(2).choice(["group1", "group2"], 10),
}
)
xlabel, ylabel = "x", "y"
ax = df.boxplot(xlabel=xlabel, ylabel=ylabel, **vert)
assert ax.get_xlabel() == xlabel
assert ax.get_ylabel() == ylabel
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
def test_boxplot_group_xlabel_ylabel(self, vert):
df = DataFrame(
{
"a": np.random.default_rng(2).standard_normal(10),
"b": np.random.default_rng(2).standard_normal(10),
"group": np.random.default_rng(2).choice(["group1", "group2"], 10),
}
)
xlabel, ylabel = "x", "y"
ax = df.boxplot(by="group", xlabel=xlabel, ylabel=ylabel, **vert)
for subplot in ax:
assert subplot.get_xlabel() == xlabel
assert subplot.get_ylabel() == ylabel
@pytest.mark.filterwarnings("ignore:set_ticklabels:UserWarning")
def test_boxplot_group_no_xlabel_ylabel(self, vert, request):
if Version(mpl.__version__) >= Version("3.10") and vert == {
"orientation": "horizontal"
}:
request.applymarker(
pytest.mark.xfail(reason=f"{vert} fails starting with matplotlib 3.10")
)
df = DataFrame(
{
"a": np.random.default_rng(2).standard_normal(10),
"b": np.random.default_rng(2).standard_normal(10),
"group": np.random.default_rng(2).choice(["group1", "group2"], 10),
}
)
ax = df.boxplot(by="group", **vert)
for subplot in ax:
target_label = (
subplot.get_xlabel()
if vert == {"vert": True} or vert == {"orientation": "vertical"}
else subplot.get_ylabel()
)
assert target_label == pprint_thing(["group"])
| TestDataFramePlots |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 25331,
"end": 27201
} | class ____(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
def test_literal(self, literal_round_trip):
literal_round_trip(Integer, [5], [5])
def _huge_ints():
return testing.combinations(
2147483649, # 32 bits
2147483648, # 32 bits
2147483647, # 31 bits
2147483646, # 31 bits
-2147483649, # 32 bits
-2147483648, # 32 interestingly, asyncpg accepts this one as int32
-2147483647, # 31
-2147483646, # 31
0,
1376537018368127,
-1376537018368127,
argnames="intvalue",
)
@_huge_ints()
def test_huge_int_auto_accommodation(self, connection, intvalue):
"""test #7909"""
eq_(
connection.scalar(
select(intvalue).where(literal(intvalue) == intvalue)
),
intvalue,
)
@_huge_ints()
def test_huge_int(self, integer_round_trip, intvalue):
integer_round_trip(BigInteger, intvalue)
@testing.fixture
def integer_round_trip(self, metadata, connection):
def run(datatype, data):
int_table = Table(
"integer_table",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("integer_data", datatype),
)
metadata.create_all(config.db)
connection.execute(
int_table.insert(), {"id": 1, "integer_data": data}
)
row = connection.execute(select(int_table.c.integer_data)).first()
eq_(row, (data,))
assert isinstance(row[0], int)
return run
| IntegerTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/SpinBox.py | {
"start": 208,
"end": 26678
} | class ____(QtWidgets.QAbstractSpinBox):
"""
**Bases:** QtWidgets.QAbstractSpinBox
Extension of QSpinBox widget for selection of a numerical value.
Adds many extra features:
* SI prefix notation (eg, automatically display "300 mV" instead of "0.003 V")
* Float values with linear and decimal stepping (1-9, 10-90, 100-900, etc.)
* Option for unbounded values
* Delayed signals (allows multiple rapid changes with only one change signal)
* Customizable text formatting
============================= ==============================================
**Signals:**
valueChanged(value) Same as QSpinBox; emitted every time the value
has changed.
sigValueChanged(self) Emitted when value has changed, but also combines
multiple rapid changes into one signal (eg,
when rolling the mouse wheel).
sigValueChanging(self, value) Emitted immediately for all value changes.
============================= ==============================================
"""
## There's a PyQt bug that leaks a reference to the
## QLineEdit returned from QAbstractSpinBox.lineEdit()
## This makes it possible to crash the entire program
## by making accesses to the LineEdit after the spinBox has been deleted.
## I have no idea how to get around this..
valueChanged = QtCore.Signal(object) # (value) for compatibility with QSpinBox
sigValueChanged = QtCore.Signal(object) # (self)
sigValueChanging = QtCore.Signal(object, object) # (self, value) sent immediately; no delay.
def __init__(self, parent=None, value=0.0, **kwargs):
"""
============== ========================================================================
**Arguments:**
parent Sets the parent widget for this SpinBox (optional). Default is None.
value (float/int) initial value. Default is 0.0.
============== ========================================================================
All keyword arguments are passed to :func:`setOpts`.
"""
QtWidgets.QAbstractSpinBox.__init__(self, parent)
self.lastValEmitted = None
self.lastText = ''
self.textValid = True ## If false, we draw a red border
self.setMinimumWidth(0)
self._lastFontHeight = None
self.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Preferred)
self.errorBox = ErrorBox(self.lineEdit())
self.opts = {
'bounds': [None, None],
'wrapping': False,
## normal arithmetic step
'step': decimal.Decimal('0.01'), ## if 'dec' is false, the spinBox steps by 'step' every time
## if 'dec' is True, the step size is relative to the value
## 'step' needs to be an integral divisor of ten, ie 'step'*n=10 for some integer value of n (but only if dec is True)
'dec': False, ## if true, does decimal stepping. ie from 1-10 it steps by 'step', from 10 to 100 it steps by 10*'step', etc.
## if true, minStep must be set in order to cross zero.
'int': False, ## Set True to force value to be integer. If True, 'step' is rounded to the nearest integer or defaults to 1.
'finite': True,
'prefix': '', ## string to be prepended to spin box value
'suffix': '',
'suffixPower': 1, ## power to which the suffix units are raised (for correct SI prefix scaling)
'siPrefix': False, ## Set to True to display numbers with SI prefix (ie, 100pA instead of 1e-10A)
'scaleAtZero': None,
'delay': 0.3, ## delay sending wheel update signals for 300ms
'delayUntilEditFinished': True, ## do not send signals until text editing has finished
'decimals': 6,
'format': "{prefix}{prefixGap}{scaledValueString}{suffixGap}{siPrefix}{suffix}",
'regex': fn.float_regex_for_locale(self.locale()), #Default regex based on system locale
'evalFunc': decimal.Decimal,
'compactHeight': True, # manually remove extra margin outside of text
}
if kwargs.get('int', False):
self.opts['step'] = 1
self.decOpts = ['step', 'minStep']
self.val = decimal.Decimal(str(value)) ## Value is precise decimal. Ordinary math not allowed.
self.updateText()
self.skipValidate = False
self.setCorrectionMode(self.CorrectionMode.CorrectToPreviousValue)
self.setKeyboardTracking(False)
self.proxy = SignalProxy(
self.sigValueChanging,
delay=self.opts['delay'],
slot=self.delayedChange,
threadSafe=False,
)
self.setOpts(**kwargs)
self._updateHeight()
self.editingFinished.connect(self.editingFinishedEvent)
def setOpts(self, **opts):
"""Set options affecting the behavior of the SpinBox.
============== ========================================================================
**Arguments:**
bounds (min,max) Minimum and maximum values allowed in the SpinBox.
Either may be None to leave the value unbounded. By default, values are
unbounded.
suffix (str) suffix (units) to display after the numerical value. By default,
suffix is an empty str.
siPrefix (bool) If True, then an SI prefix is automatically prepended
to the units and the value is scaled accordingly. For example,
if value=0.003 and suffix='V', then the SpinBox will display
"300 mV" (but a call to SpinBox.value will still return 0.003). In case
the value represents a dimensionless quantity that might span many
orders of magnitude, such as a Reynolds number, an SI
prefix is allowed with no suffix. Default is False.
suffixPower (int or float) The power to which the suffix units are raised. This is used
for correct scaling of the SI prefix when the units are nonlinear. Supports
positive, negative and non-integral powers. Default is 1.
prefix (str) String to be prepended to the spin box value. Default is an empty string.
scaleAtZero (float) If siPrefix is also True, this option then sets the default SI prefix
that a value of 0 will have applied (and thus the default scale of the first
number the user types in after the SpinBox has been zeroed out).
step (float) The size of a single step. This is used when clicking the up/
down arrows, when rolling the mouse wheel, or when pressing
keyboard arrows while the widget has keyboard focus. Note that
the interpretation of this value is different when specifying
the 'dec' argument. If 'int' is True, 'step' is rounded to the nearest integer.
Default is 0.01 if 'int' is False and 1 otherwise.
dec (bool) If True, then the step value will be adjusted to match
the current size of the variable (for example, a value of 15
might step in increments of 1 whereas a value of 1500 would
step in increments of 100). In this case, the 'step' argument
is interpreted *relative* to the current value. The most common
'step' values when dec=True are 0.1, 0.2, 0.5, and 1.0. Default is
False.
minStep (float) When dec=True, this specifies the minimum allowable step size.
int (bool) If True, the value is forced to integer type.
If True, 'step' is rounded to the nearest integer or defaults to 1.
Default is False
finite (bool) When False and int=False, infinite values (nan, inf, -inf) are
permitted. Default is True.
wrapping (bool) If True and both bounds are not None, spin box has circular behavior.
decimals (int) Number of decimal values to display. Default is 6.
format (str) Formatting string used to generate the text shown. Formatting is
done with ``str.format()`` and makes use of several arguments:
* *value* - the unscaled value of the spin box
* *prefix* - the prefix string
* *prefixGap* - a single space if a prefix is present, or an empty
string otherwise
* *suffix* - the suffix string
* *scaledValue* - the scaled value to use when an SI prefix is present
* *scaledValueString* - scaled value as a string, formatted according to locale
* *siPrefix* - the SI prefix string (if any), or an empty string if
this feature has been disabled
* *suffixGap* - a single space if a suffix is present, or an empty
string otherwise.
regex (str or RegexObject) Regular expression used to parse the spinbox text.
May contain the following group names:
* *number* - matches the numerical portion of the string (mandatory)
* *siPrefix* - matches the SI prefix string
* *suffix* - matches the suffix string
Default depends on locale, and is either
``pyqtgraph.functions.FLOAT_REGEX_PERIOD`` or
``pyqtgraph.functions.FLOAT_REGEX_COMMA``.
evalFunc (callable) Fucntion that converts a numerical string to a number,
preferrably a Decimal instance. This function handles only the numerical
of the text; it does not have access to the suffix or SI prefix.
compactHeight (bool) if True, then set the maximum height of the spinbox based on the
height of its font. This allows more compact packing on platforms with
excessive widget decoration. Default is True.
locale (QtCore.QLocale) Sets the locale used for formatting and parsing numbers.
Affects the decimal point behavior. Default is system locale.
============== ========================================================================
"""
#print opts
for k,v in opts.items():
if k == 'bounds':
self.setMinimum(v[0], update=False)
self.setMaximum(v[1], update=False)
elif k == 'min':
self.setMinimum(v, update=False)
elif k == 'max':
self.setMaximum(v, update=False)
elif k in ['step', 'minStep']:
self.opts[k] = decimal.Decimal(str(v))
elif k == 'value':
pass ## don't set value until bounds have been set
elif k == 'format':
self.opts[k] = str(v)
elif k == 'regex' and isinstance(v, str):
self.opts[k] = re.compile(v)
elif k == 'locale':
self.setLocale(v)
elif k in self.opts:
self.opts[k] = v
else:
raise TypeError("Invalid keyword argument '%s'." % k)
if 'value' in opts:
self.setValue(opts['value'])
## If bounds have changed, update value to match
if 'bounds' in opts and 'value' not in opts:
self.setValue()
## sanity checks:
if self.opts['int']:
self.opts['step'] = round(self.opts.get('step', 1))
if 'minStep' in opts:
step = opts['minStep']
if int(step) != step:
raise Exception('Integer SpinBox must have integer minStep size.')
else:
ms = int(self.opts.get('minStep', 1))
if ms < 1:
ms = 1
self.opts['minStep'] = ms
if self.opts['dec']:
if self.opts.get('minStep') is None:
self.opts['minStep'] = self.opts['step']
if 'delay' in opts:
self.proxy.setDelay(opts['delay'])
self.updateText()
def setLocale(self, locale):
"""Set the locale used for formatting and parsing numbers.
Arguments:
locale (QtCore.QLocale): The locale to use.
"""
super().setLocale(locale)
# Update regex to match new locale decimal point
self.opts['regex'] = fn.float_regex_for_locale(locale)
self.updateText()
def setMaximum(self, m, update=True):
"""Set the maximum allowed value (or None for no limit)"""
if m is not None:
m = decimal.Decimal(str(m))
self.opts['bounds'][1] = m
if update:
self.setValue()
def setMinimum(self, m, update=True):
"""Set the minimum allowed value (or None for no limit)"""
if m is not None:
m = decimal.Decimal(str(m))
self.opts['bounds'][0] = m
if update:
self.setValue()
def wrapping(self):
"""Return whether or not the spin box is circular."""
return self.opts['wrapping']
def setWrapping(self, s):
"""Set whether spin box is circular.
Both bounds must be set for this to have an effect."""
self.opts['wrapping'] = s
def setPrefix(self, p):
"""Set a string prefix.
"""
self.setOpts(prefix=p)
def setRange(self, r0, r1):
"""Set the upper and lower limits for values in the spinbox.
"""
self.setOpts(bounds = [r0,r1])
def setProperty(self, prop, val):
## for QSpinBox compatibility
if prop == 'value':
#if type(val) is QtCore.QVariant:
#val = val.toDouble()[0]
self.setValue(val)
else:
print("Warning: SpinBox.setProperty('%s', ..) not supported." % prop)
def setSuffix(self, suf):
"""Set the string suffix appended to the spinbox text.
"""
self.setOpts(suffix=suf)
def setSingleStep(self, step):
"""Set the step size used when responding to the mouse wheel, arrow
buttons, or arrow keys.
"""
self.setOpts(step=step)
def setDecimals(self, decimals):
"""Set the number of decimals to be displayed when formatting numeric
values.
"""
self.setOpts(decimals=decimals)
def selectNumber(self):
"""
Select the numerical portion of the text to allow quick editing by the user.
"""
le = self.lineEdit()
text = le.text()
m = self.opts['regex'].match(text)
if m is None:
return
s,e = m.start('number'), m.end('number')
le.setSelection(s, e-s)
def focusInEvent(self, ev):
super(SpinBox, self).focusInEvent(ev)
self.selectNumber()
def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val)
else:
return float(self.val)
def setValue(self, value=None, update=True, delaySignal=False):
"""Set the value of this SpinBox.
If the value is out of bounds, it will be clipped to the nearest boundary
or wrapped if wrapping is enabled.
If the spin is integer type, the value will be coerced to int.
Returns the actual value set.
If value is None, then the current value is used (this is for resetting
the value after bounds, etc. have changed)
"""
if value is None:
value = self.value()
bounded = True
if not isnan(value):
bounds = self.opts['bounds']
if None not in bounds and self.opts['wrapping'] is True:
bounded = False
if isinf(value):
value = self.val
else:
# Casting of Decimals to floats required to avoid unexpected behavior of remainder operator
value = float(value)
l, u = float(bounds[0]), float(bounds[1])
value = (value - l) % (u - l) + l
else:
if bounds[0] is not None and value < bounds[0]:
bounded = False
value = bounds[0]
if bounds[1] is not None and value > bounds[1]:
bounded = False
value = bounds[1]
if self.opts['int']:
value = int(value)
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
prev, self.val = self.val, value
changed = not fn.eq(value, prev) # use fn.eq to handle nan
if update and (changed or not bounded):
self.updateText()
if changed:
self.sigValueChanging.emit(self, float(self.val)) ## change will be emitted in 300ms if there are no subsequent changes.
if not delaySignal:
self.emitChanged()
return value
def emitChanged(self):
self.lastValEmitted = self.val
self.valueChanged.emit(float(self.val))
self.sigValueChanged.emit(self)
@QtCore.Slot()
def delayedChange(self):
try:
if not fn.eq(self.val, self.lastValEmitted): # use fn.eq to handle nan
self.emitChanged()
except RuntimeError:
pass ## This can happen if we try to handle a delayed signal after someone else has already deleted the underlying C++ object.
def widgetGroupInterface(self):
return (self.valueChanged, SpinBox.value, SpinBox.setValue)
def sizeHint(self):
return QtCore.QSize(120, 0)
def stepEnabled(self):
return self.StepEnabledFlag.StepUpEnabled | self.StepEnabledFlag.StepDownEnabled
def stepBy(self, n):
## note all steps (arrow buttons, wheel, up/down keys..) emit delayed signals only.
self.setValue(self._stepByValue(n), delaySignal=True)
def _stepByValue(self, steps):
if isinf(self.val) or isnan(self.val):
return self.val
steps = int(steps)
sign = [decimal.Decimal(-1), decimal.Decimal(1)][steps >= 0]
val = self.val
for i in range(int(abs(steps))):
if self.opts['dec']:
if val == 0:
step = self.opts['minStep']
exp = None
else:
vs = [decimal.Decimal(-1), decimal.Decimal(1)][val >= 0]
## fudge factor. at some places, the step size depends on the step sign.
fudge = decimal.Decimal('1.01') ** (sign * vs)
exp = abs(val * fudge).log10().quantize(1, decimal.ROUND_FLOOR)
step = self.opts['step'] * decimal.Decimal(10) ** exp
if 'minStep' in self.opts:
step = max(step, self.opts['minStep'])
val += sign * step
else:
val += sign * self.opts['step']
if 'minStep' in self.opts and abs(val) < self.opts['minStep']:
val = decimal.Decimal(0)
return val
def valueInRange(self, value):
if not isnan(value):
bounds = self.opts['bounds']
if bounds[0] is not None and value < bounds[0]:
return False
if bounds[1] is not None and value > bounds[1]:
return False
if self.opts.get('int', False):
if int(value) != value:
return False
return True
def updateText(self):
# temporarily disable validation
self.skipValidate = True
txt = self.formatText()
# actually set the text
self.lineEdit().setText(txt)
self.lastText = txt
# re-enable the validation
self.skipValidate = False
def formatText(self):
# get the number of decimal places to print
decimals = self.opts['decimals']
suffix = self.opts['suffix']
prefix = self.opts['prefix']
# format the string
val = self.value()
#Default: no scaling, no prefix
parts = {'value': val, 'suffix': suffix, 'decimals': decimals, 'siPrefix': '', 'prefix':prefix}
if self.opts['siPrefix'] is True:
# SI prefix was requested, so scale the value accordingly
if self.val == 0:
if self.opts['scaleAtZero'] is not None:
(s, p) = fn.siScale(self.opts['scaleAtZero'], power=self.opts['suffixPower'])
else:
(s, p) = fn.siScale(self._stepByValue(1), power=self.opts['suffixPower'])
else:
(s, p) = fn.siScale(val, power=self.opts['suffixPower'])
val *= s #Scale value
parts['siPrefix'] = p
if self.opts['int']:
#Keeping scaledValue for backward compatibility
parts['scaledValue'] = int(round(val))
parts['scaledValueString'] = str(parts['scaledValue'])
else:
parts['scaledValue'] = val
valuestring = self.locale().toString(val, 'g', decimals)
#Remove group separator if any
parts['scaledValueString'] = valuestring.replace(self.locale().groupSeparator(), '')
parts['prefixGap'] = '' if parts['prefix'] == '' else ' '
parts['suffixGap'] = '' if (parts['suffix'] == '' and parts['siPrefix'] == '') else ' '
return self.opts['format'].format(**parts)
def validate(self, strn, pos):
if self.skipValidate:
ret = QtGui.QValidator.State.Acceptable
else:
try:
val = self.interpret()
if val is False:
ret = QtGui.QValidator.State.Intermediate
else:
if self.valueInRange(val):
if not self.opts['delayUntilEditFinished']:
self.setValue(val, update=False)
ret = QtGui.QValidator.State.Acceptable
else:
ret = QtGui.QValidator.State.Intermediate
except:
import sys
sys.excepthook(*sys.exc_info())
ret = QtGui.QValidator.State.Intermediate
## draw / clear border
if ret == QtGui.QValidator.State.Intermediate:
self.textValid = False
elif ret == QtGui.QValidator.State.Acceptable:
self.textValid = True
## note: if text is invalid, we don't change the textValid flag
## since the text will be forced to its previous state anyway
self.update()
self.errorBox.setVisible(not self.textValid)
return (ret, strn, pos)
def fixup(self, strn):
# fixup is called when the spinbox loses focus with an invalid or intermediate string
self.updateText()
return self.lineEdit().text()
def interpret(self):
"""Return value of text or False if text is invalid."""
strn = self.lineEdit().text()
# strip prefix
try:
strn = strn.removeprefix(self.opts['prefix'])
except AttributeError:
strn = strn[len(self.opts['prefix']):]
# tokenize into numerical value, si prefix, and suffix
try:
val, siprefix, suffix = fn.siParse(strn, self.opts['regex'], suffix=self.opts['suffix'])
except Exception:
return False
# check suffix
if suffix != self.opts['suffix']:
return False
# generate value
val = self.opts['evalFunc'](val.replace(',', '.')) #Ensure decimal point is '.'
if (self.opts['int'] or self.opts['finite']) and (isinf(val) or isnan(val)):
return False
if self.opts['int']:
val = int(fn.siApply(val, siprefix))
else:
try:
val = fn.siApply(val, siprefix)
except Exception:
import sys
sys.excepthook(*sys.exc_info())
return False
return val
@QtCore.Slot()
def editingFinishedEvent(self):
"""Edit has finished; set value."""
if self.lineEdit().text() == self.lastText:
return
try:
val = self.interpret()
except Exception:
return
if val is False:
return
if val == self.val:
return
self.setValue(val, delaySignal=False) ## allow text update so that values are reformatted pretty-like
def _updateHeight(self):
# SpinBox has very large margins on some platforms; this is a hack to remove those
# margins and allow more compact packing of controls.
if not self.opts['compactHeight']:
self.setMaximumHeight(1000000)
return
h = QtGui.QFontMetrics(self.font()).height()
if self._lastFontHeight != h:
self._lastFontHeight = h
self.setMaximumHeight(h)
def paintEvent(self, ev):
self._updateHeight()
super().paintEvent(ev)
| SpinBox |
python | getsentry__sentry | src/sentry/seer/endpoints/organization_seer_explorer_runs.py | {
"start": 715,
"end": 862
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read"],
}
@region_silo_endpoint
| OrganizationSeerExplorerRunsPermission |
python | scikit-learn__scikit-learn | sklearn/compose/tests/test_column_transformer.py | {
"start": 1417,
"end": 1553
} | class ____(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return 2 * X
| DoubleTrans |
python | chardet__chardet | chardet/sbcharsetprober.py | {
"start": 1578,
"end": 6334
} | class ____(CharSetProber):
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
def __init__(
self,
model: SingleByteCharSetModel,
is_reversed: bool = False,
name_prober: Optional[CharSetProber] = None,
) -> None:
super().__init__()
self._model = model
# TRUE if we need to reverse every pair in the model lookup
self._reversed = is_reversed
# Optional auxiliary prober for name decision
self._name_prober = name_prober
self._last_order = 255
self._seq_counters: List[int] = []
self._total_seqs = 0
self._total_char = 0
self._control_char = 0
self._freq_char = 0
self.reset()
def reset(self) -> None:
super().reset()
# char order of last character
self._last_order = 255
self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
self._total_seqs = 0
self._total_char = 0
self._control_char = 0
# characters that fall in our sampling range
self._freq_char = 0
@property
def charset_name(self) -> Optional[str]:
if self._name_prober:
return self._name_prober.charset_name
return self._model.charset_name
@property
def language(self) -> Optional[str]:
if self._name_prober:
return self._name_prober.language
return self._model.language
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
# TODO: Make filter_international_words keep things in self.alphabet
if not self._model.keep_ascii_letters:
byte_str = self.filter_international_words(byte_str)
else:
byte_str = self.remove_xml_tags(byte_str)
if not byte_str:
return self.state
char_to_order_map = self._model.char_to_order_map
language_model = self._model.language_model
for char in byte_str:
order = char_to_order_map.get(char, CharacterCategory.UNDEFINED)
# XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
# CharacterCategory.SYMBOL is actually 253, so we use CONTROL
# to make it closer to the original intent. The only difference
# is whether or not we count digits and control characters for
# _total_char purposes.
if order < CharacterCategory.CONTROL:
self._total_char += 1
if order < self.SAMPLE_SIZE:
self._freq_char += 1
if self._last_order < self.SAMPLE_SIZE:
self._total_seqs += 1
if not self._reversed:
lm_cat = language_model[self._last_order][order]
else:
lm_cat = language_model[order][self._last_order]
self._seq_counters[lm_cat] += 1
self._last_order = order
charset_name = self._model.charset_name
if self.state == ProbingState.DETECTING:
if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
confidence = self.get_confidence()
if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
self.logger.debug(
"%s confidence = %s, we have a winner", charset_name, confidence
)
self._state = ProbingState.FOUND_IT
elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
self.logger.debug(
"%s confidence = %s, below negative shortcut threshold %s",
charset_name,
confidence,
self.NEGATIVE_SHORTCUT_THRESHOLD,
)
self._state = ProbingState.NOT_ME
return self.state
def get_confidence(self) -> float:
r = 0.01
if self._total_seqs > 0:
r = (
(
self._seq_counters[SequenceLikelihood.POSITIVE]
+ 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
)
/ self._total_seqs
/ self._model.typical_positive_ratio
)
# The more control characters (proportionnaly to the size
# of the text), the less confident we become in the current
# charset.
r = r * (self._total_char - self._control_char) / self._total_char
r = r * self._freq_char / self._total_char
if r >= 1.0:
r = 0.99
return r
| SingleByteCharSetProber |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_cond_format18.py | {
"start": 345,
"end": 4749
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 1)
worksheet.write("A2", 2)
worksheet.write("A3", 3)
worksheet.write("A4", 4)
worksheet.write("A5", 5)
worksheet.write("A6", 6)
worksheet.write("A7", 7)
worksheet.write("A8", 8)
worksheet.write("A9", 9)
worksheet.write("A10", 10)
worksheet.write("A11", 11)
worksheet.write("A12", 12)
worksheet.conditional_format(
"A1:A12",
{
"type": "3_color_scale",
"multi_range": "$A$3:$A$4 A1 A6:$A$8 $A10 A$12",
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A12"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
<row r="10" spans="1:1">
<c r="A10">
<v>10</v>
</c>
</row>
<row r="11" spans="1:1">
<c r="A11">
<v>11</v>
</c>
</row>
<row r="12" spans="1:1">
<c r="A12">
<v>12</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A3:A4 A1 A6:A8 A10 A12">
<cfRule type="colorScale" priority="1">
<colorScale>
<cfvo type="min" val="0"/>
<cfvo type="percentile" val="50"/>
<cfvo type="max" val="0"/>
<color rgb="FFF8696B"/>
<color rgb="FFFFEB84"/>
<color rgb="FF63BE7B"/>
</colorScale>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | streamlit__streamlit | lib/tests/streamlit/runtime/app_session_test.py | {
"start": 92434,
"end": 93173
} | class ____(unittest.TestCase):
def test_returns_true_if_current_page_changed(self):
session = _create_test_session()
session._client_state.page_script_hash = "hash2"
assert session._should_rerun_on_file_change("page2.py")
def test_returns_true_if_changed_file_is_not_page(self):
session = _create_test_session()
session._client_state.page_script_hash = "hash1"
assert session._should_rerun_on_file_change("some_other_file.py")
def test_returns_false_if_different_page_changed(self):
session = _create_test_session()
session._client_state.page_script_hash = "hash2"
assert not session._should_rerun_on_file_change("page1.py")
| ShouldRerunOnFileChangeTest |
python | scipy__scipy | scipy/io/matlab/_mio4.py | {
"start": 1744,
"end": 2146
} | class ____:
# Mat4 variables never logical or global
is_logical = False
is_global = False
def __init__(self,
name,
dtype,
mclass,
dims,
is_complex):
self.name = name
self.dtype = dtype
self.mclass = mclass
self.dims = dims
self.is_complex = is_complex
| VarHeader4 |
python | py-pdf__pypdf | pypdf/filters.py | {
"start": 16732,
"end": 17864
} | class ____:
"""Decodes string ASCII85-encoded data into a byte format."""
@staticmethod
def decode(
data: Union[str, bytes],
decode_parms: Optional[DictionaryObject] = None,
**kwargs: Any,
) -> bytes:
"""
Decode an Ascii85 encoded data stream.
Args:
data: ``bytes`` or ``str`` text to decode.
decode_parms: this filter does not use parameters.
Returns:
decoded data.
"""
if isinstance(data, str):
data = data.encode()
data = data.strip(WHITESPACES_AS_BYTES)
if len(data) > 2 and data.endswith(b">"):
data = data[:-1].rstrip(WHITESPACES_AS_BYTES) + data[-1:]
try:
return a85decode(data, adobe=True, ignorechars=WHITESPACES_AS_BYTES)
except ValueError as error:
if error.args[0] == "Ascii85 encoded byte sequences must end with b'~>'":
logger_warning("Ignoring missing Ascii85 end marker.", __name__)
return a85decode(data, adobe=False, ignorechars=WHITESPACES_AS_BYTES)
raise
| ASCII85Decode |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 74356,
"end": 78349
} | class ____:
async def test_task_run_tags_added_at_submission(
self, prefect_client, events_pipeline
):
@flow
def my_flow():
with tags("a", "b"):
future = my_task.submit()
return future
@task
def my_task():
pass
task_state = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert set(task_run.tags) == {"a", "b"}
async def test_task_run_tags_added_at_run(self, prefect_client, events_pipeline):
@flow
def my_flow():
with tags("a", "b"):
state = my_task(return_state=True)
return state
@task
def my_task():
pass
task_state = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert set(task_run.tags) == {"a", "b"}
async def test_task_run_tags_added_at_call(self, prefect_client, events_pipeline):
@flow
def my_flow():
with tags("a", "b"):
result = my_task()
return get_state_for_result(result)
@task
def my_task():
return "foo"
task_state, _ = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert set(task_run.tags) == {"a", "b"}
async def test_task_run_tags_include_tags_on_task_object(
self, prefect_client, events_pipeline
):
@flow
def my_flow():
with tags("c", "d"):
state = my_task(return_state=True)
return state
@task(tags={"a", "b"})
def my_task():
pass
task_state = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert set(task_run.tags) == {"a", "b", "c", "d"}
async def test_task_run_tags_include_flow_run_tags(
self, prefect_client, events_pipeline
):
@flow
def my_flow():
with tags("c", "d"):
state = my_task(return_state=True)
return state
@task
def my_task():
pass
with tags("a", "b"):
task_state = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert set(task_run.tags) == {"a", "b", "c", "d"}
async def test_task_run_tags_not_added_outside_context(
self, prefect_client, events_pipeline
):
@flow
def my_flow():
with tags("a", "b"):
my_task()
state = my_task(return_state=True)
return state
@task
def my_task():
pass
task_state = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert not task_run.tags
async def test_task_run_tags_respects_nesting(
self, prefect_client, events_pipeline
):
@flow
def my_flow():
with tags("a", "b"):
with tags("c", "d"):
state = my_task(return_state=True)
return state
@task
def my_task():
pass
task_state = my_flow()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert set(task_run.tags) == {"a", "b", "c", "d"}
| TestTaskRunTags |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/scalarstring.py | {
"start": 1571,
"end": 1889
} | class ____(ScalarString):
__slots__ = 'comment' # the comment after the | on the first line
style = '|'
def __new__(cls, value, anchor=None):
# type: (Text, Any) -> Any
return ScalarString.__new__(cls, value, anchor=anchor)
PreservedScalarString = LiteralScalarString
| LiteralScalarString |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/cymysql.py | {
"start": 1328,
"end": 1827
} | class ____(BIT):
def result_processor(
self, dialect: Dialect, coltype: object
) -> Optional[_ResultProcessorType[Any]]:
"""Convert MySQL's 64 bit, variable length binary string to a long."""
def process(value: Optional[Iterable[int]]) -> Optional[int]:
if value is not None:
v = 0
for i in iter(value):
v = v << 8 | i
return v
return value
return process
| _cymysqlBIT |
python | tensorflow__tensorflow | tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py | {
"start": 2835,
"end": 9632
} | class ____(tf.test.TestCase):
def _apply(self, defun=False, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
if defun:
model.call = tf.function(model.call)
with tf.device(device), context.execution_mode(execution_mode):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
context.async_wait()
self.assertEqual((2, 1000), output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply(self):
self._apply(defun=False)
def test_apply_async(self):
self._apply(defun=False, execution_mode=context.ASYNC)
def test_apply_with_defun(self):
self._apply(defun=True)
def test_apply_with_defun_async(self):
self._apply(defun=True, execution_mode=context.ASYNC)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_no_top(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1)
if data_format == 'channels_first' else (2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_with_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False, pooling='avg')
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
self.assertEqual((2, 2048), output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_no_average_pooling(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, average_pooling=False, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 7, 7) if data_format == 'channels_first' else
(2, 7, 7, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_block3_strides(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_apply_retrieve_intermediates(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(
data_format, block3_strides=True, include_top=False)
intermediates_dict = {}
with tf.device(device):
images, _ = resnet50_test_util.random_batch(2, data_format)
output = model(images, training=False,
intermediates_dict=intermediates_dict)
output_shape = ((2, 2048, 1, 1) if data_format == 'channels_first' else
(2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
if data_format == 'channels_first':
block_shapes = {
'block0': (2, 64, 112, 112),
'block0mp': (2, 64, 55, 55),
'block1': (2, 256, 55, 55),
'block2': (2, 512, 28, 28),
'block3': (2, 1024, 7, 7),
'block4': (2, 2048, 1, 1),
}
else:
block_shapes = {
'block0': (2, 112, 112, 64),
'block0mp': (2, 55, 55, 64),
'block1': (2, 55, 55, 256),
'block2': (2, 28, 28, 512),
'block3': (2, 7, 7, 1024),
'block4': (2, 1, 1, 2048),
}
for (block_name, block) in intermediates_dict.items():
self.assertEqual(block_shapes[block_name], block.shape)
def _test_train(self, execution_mode=None):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.compat.v2.summary.experimental.set_step(
tf.compat.v1.train.get_or_create_global_step())
logdir = tempfile.mkdtemp()
with tf.compat.v2.summary.create_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.compat.v2.summary.record_if(True):
with tf.device(device), context.execution_mode(execution_mode):
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
images, labels = resnet50_test_util.random_batch(2, data_format)
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
self.assertEqual(320, len(model.variables))
context.async_wait()
events = events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_train(self):
self._test_train()
@test_util.disable_tfrt('TFE_ContextGetExecutorForThread missing b/156188669')
def test_train_async(self):
self._test_train(execution_mode=context.ASYNC)
@test_util.disable_tfrt('Flaky test. b/157103729')
def test_no_garbage(self):
device, data_format = resnet50_test_util.device_and_data_format()
model = resnet50.ResNet50(data_format)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
with tf.device(device):
images, labels = resnet50_test_util.random_batch(2, data_format)
gc.disable()
# Warm up. Note that this first run does create significant amounts of
# garbage to be collected. The hope is that this is a build-only effect,
# and a subsequent training loop will create nothing which needs to be
# collected.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
for _ in range(2):
# Run twice to ensure that garbage that is created on the first
# iteration is no longer accessible.
apply_gradients(model, optimizer,
compute_gradients(model, images, labels))
gc.collect()
# There should be no garbage requiring collection.
self.assertEqual(0, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
| ResNet50Test |
python | python-poetry__poetry | tests/test_factory.py | {
"start": 1204,
"end": 17220
} | class ____(Plugin):
def activate(self, poetry: Poetry, io: IO) -> None:
io.write_line("Setting readmes")
poetry.package.readmes = (Path("README.md"),)
def test_create_poetry(fixture_dir: FixtureDirGetter) -> None:
poetry = Factory().create_poetry(fixture_dir("sample_project"))
package = poetry.package
assert package.name == "sample-project"
assert package.version.text == "1.2.3"
assert package.description == "Some description."
assert package.authors == ["SΓ©bastien Eustace <sebastien@eustace.io>"]
assert package.license is not None
assert package.license.id == "MIT"
for readme in package.readmes:
assert (
readme.relative_to(fixture_dir("sample_project")).as_posix() == "README.rst"
)
assert package.homepage == "https://python-poetry.org"
assert package.repository_url == "https://github.com/python-poetry/poetry"
assert package.keywords == ["packaging", "dependency", "poetry"]
assert package.python_versions == "~2.7 || ^3.6"
assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0"
dependencies = {}
for dep in package.requires:
dependencies[dep.name] = dep
cleo = dependencies[canonicalize_name("cleo")]
assert cleo.pretty_constraint == "^0.6"
assert not cleo.is_optional()
pendulum = dependencies[canonicalize_name("pendulum")]
assert pendulum.pretty_constraint == "branch 2.0"
assert pendulum.is_vcs()
assert isinstance(pendulum, VCSDependency)
assert pendulum.vcs == "git"
assert pendulum.branch == "2.0"
assert pendulum.source == "https://github.com/sdispater/pendulum.git"
assert pendulum.allows_prereleases()
requests = dependencies[canonicalize_name("requests")]
assert requests.pretty_constraint == "^2.18"
assert not requests.is_vcs()
assert not requests.allows_prereleases()
assert requests.is_optional()
assert requests.extras == frozenset(["security"])
pathlib2 = dependencies[canonicalize_name("pathlib2")]
assert pathlib2.pretty_constraint == "^2.2"
assert parse_constraint(pathlib2.python_versions) == parse_constraint("~2.7")
assert not pathlib2.is_optional()
demo = dependencies[canonicalize_name("demo")]
assert demo.is_file()
assert not demo.is_vcs()
assert demo.name == "demo"
assert demo.pretty_constraint == "*"
demo = dependencies[canonicalize_name("my-package")]
assert not demo.is_file()
assert demo.is_directory()
assert not demo.is_vcs()
assert demo.name == "my-package"
assert demo.pretty_constraint == "*"
simple_project = dependencies[canonicalize_name("simple-project")]
assert not simple_project.is_file()
assert simple_project.is_directory()
assert not simple_project.is_vcs()
assert simple_project.name == "simple-project"
assert simple_project.pretty_constraint == "*"
functools32 = dependencies[canonicalize_name("functools32")]
assert functools32.name == "functools32"
assert functools32.pretty_constraint == "^3.2.3"
assert (
str(functools32.marker)
== 'python_version ~= "2.7" and sys_platform == "win32" or python_version in'
' "3.4 3.5"'
)
assert "db" in package.extras
classifiers = package.classifiers
assert classifiers == [
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
assert package.all_classifiers == [
"License :: OSI Approved :: MIT License",
*(
f"Programming Language :: Python :: {version}"
for version in sorted(
Package.AVAILABLE_PYTHONS,
key=lambda x: tuple(map(int, x.split("."))),
)
if package.python_constraint.allows_any(
parse_constraint(version + ".*")
if len(version) == 1
else Version.parse(version)
)
),
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
@pytest.mark.parametrize(
("project",),
[
("simple_project_legacy",),
("project_with_extras",),
],
)
def test_create_pyproject_from_package(
project: str, fixture_dir: FixtureDirGetter
) -> None:
poetry = Factory().create_poetry(fixture_dir(project))
package = poetry.package
pyproject: dict[str, Any] = Factory.create_legacy_pyproject_from_package(package)
result = pyproject["tool"]["poetry"]
expected = poetry.pyproject.poetry_config
# Extras are normalized as they are read.
extras = expected.pop("extras", None)
if extras is not None:
normalized_extras = {
canonicalize_name(extra): dependencies
for extra, dependencies in extras.items()
}
expected["extras"] = normalized_extras
# packages do not support this at present
expected.pop("scripts", None)
# remove any empty sections
sections = list(expected.keys())
for section in sections:
if not expected[section]:
expected.pop(section)
assert not DeepDiff(expected, result)
def test_create_poetry_with_packages_and_includes(
fixture_dir: FixtureDirGetter,
) -> None:
poetry = Factory().create_poetry(fixture_dir("with-include"))
package = poetry.package
assert package.packages == [
{"include": "extra_dir/**/*.py", "format": ["sdist", "wheel"]},
{"include": "extra_dir/**/*.py", "format": ["sdist", "wheel"]},
{"include": "my_module.py", "format": ["sdist", "wheel"]},
{"include": "package_with_include", "format": ["sdist", "wheel"]},
{"include": "tests", "format": ["sdist"]},
{"include": "for_wheel_only", "format": ["wheel"]},
{"include": "src_package", "from": "src", "format": ["sdist", "wheel"]},
]
assert package.include in (
# with https://github.com/python-poetry/poetry-core/pull/773
[
{"path": "extra_dir/vcs_excluded.txt", "format": ["sdist", "wheel"]},
{"path": "notes.txt", "format": ["sdist"]},
],
# without https://github.com/python-poetry/poetry-core/pull/773
[
{"path": "extra_dir/vcs_excluded.txt", "format": ["sdist"]},
{"path": "notes.txt", "format": ["sdist"]},
],
)
def test_create_poetry_with_multi_constraints_dependency(
fixture_dir: FixtureDirGetter,
) -> None:
poetry = Factory().create_poetry(
fixture_dir("project_with_multi_constraints_dependency")
)
package = poetry.package
assert len(package.requires) == 2
def test_create_poetry_non_package_mode(fixture_dir: FixtureDirGetter) -> None:
poetry = Factory().create_poetry(fixture_dir("non_package_mode"))
assert not poetry.is_package_mode
def test_create_poetry_version_ok(fixture_dir: FixtureDirGetter) -> None:
io = BufferedIO()
Factory().create_poetry(fixture_dir("self_version_ok"), io=io)
assert io.fetch_output() == ""
assert io.fetch_error() == ""
def test_create_poetry_version_not_ok(fixture_dir: FixtureDirGetter) -> None:
with pytest.raises(PoetryError) as e:
Factory().create_poetry(fixture_dir("self_version_not_ok"))
assert (
str(e.value)
== f"This project requires Poetry <1.2, but you are using Poetry {__version__}"
)
def test_create_poetry_check_version_before_validation(
fixture_dir: FixtureDirGetter,
) -> None:
with pytest.raises(PoetryError) as e:
Factory().create_poetry(fixture_dir("self_version_not_ok_invalid_config"))
assert (
str(e.value)
== f"This project requires Poetry <1.2, but you are using Poetry {__version__}"
)
@pytest.mark.parametrize(
"project",
("with_primary_source_implicit", "with_primary_source_explicit"),
)
def test_poetry_with_primary_source(
project: str, fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir(project), io=io)
assert not poetry.pool.has_repository("PyPI")
assert poetry.pool.has_repository("foo")
assert poetry.pool.get_priority("foo") is Priority.PRIMARY
assert isinstance(poetry.pool.repository("foo"), LegacyRepository)
assert {repo.name for repo in poetry.pool.repositories} == {"foo"}
def test_poetry_with_multiple_supplemental_sources(
fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
poetry = Factory().create_poetry(fixture_dir("with_multiple_supplemental_sources"))
assert poetry.pool.has_repository("PyPI")
assert isinstance(poetry.pool.repository("PyPI"), PyPiRepository)
assert poetry.pool.get_priority("PyPI") is Priority.PRIMARY
assert poetry.pool.has_repository("foo")
assert isinstance(poetry.pool.repository("foo"), LegacyRepository)
assert poetry.pool.has_repository("bar")
assert isinstance(poetry.pool.repository("bar"), LegacyRepository)
assert {repo.name for repo in poetry.pool.repositories} == {"PyPI", "foo", "bar"}
def test_poetry_with_multiple_sources(
fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
poetry = Factory().create_poetry(fixture_dir("with_multiple_sources"))
assert not poetry.pool.has_repository("PyPI")
assert poetry.pool.has_repository("bar")
assert isinstance(poetry.pool.repository("bar"), LegacyRepository)
assert poetry.pool.has_repository("foo")
assert isinstance(poetry.pool.repository("foo"), LegacyRepository)
assert {repo.name for repo in poetry.pool.repositories} == {"bar", "foo"}
def test_poetry_with_multiple_sources_pypi(
fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir("with_multiple_sources_pypi"), io=io)
assert len(poetry.pool.repositories) == 4
assert poetry.pool.has_repository("PyPI")
assert isinstance(poetry.pool.repository("PyPI"), PyPiRepository)
assert poetry.pool.get_priority("PyPI") is Priority.PRIMARY
# PyPI must be between bar and baz!
expected = ["bar", "PyPI", "baz", "foo"]
assert [repo.name for repo in poetry.pool.repositories] == expected
def test_poetry_with_no_default_source(fixture_dir: FixtureDirGetter) -> None:
poetry = Factory().create_poetry(fixture_dir("sample_project"))
assert poetry.pool.has_repository("PyPI")
assert poetry.pool.get_priority("PyPI") is Priority.PRIMARY
assert isinstance(poetry.pool.repository("PyPI"), PyPiRepository)
assert {repo.name for repo in poetry.pool.repositories} == {"PyPI"}
def test_poetry_with_supplemental_source(
fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir("with_supplemental_source"), io=io)
assert poetry.pool.has_repository("PyPI")
assert poetry.pool.get_priority("PyPI") is Priority.PRIMARY
assert isinstance(poetry.pool.repository("PyPI"), PyPiRepository)
assert poetry.pool.has_repository("supplemental")
assert poetry.pool.get_priority("supplemental") is Priority.SUPPLEMENTAL
assert isinstance(poetry.pool.repository("supplemental"), LegacyRepository)
assert {repo.name for repo in poetry.pool.repositories} == {"PyPI", "supplemental"}
assert io.fetch_error() == ""
def test_poetry_with_explicit_source(
fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir("with_explicit_source"), io=io)
assert len(poetry.pool.repositories) == 1
assert len(poetry.pool.all_repositories) == 2
assert poetry.pool.has_repository("PyPI")
assert poetry.pool.get_priority("PyPI") is Priority.PRIMARY
assert isinstance(poetry.pool.repository("PyPI"), PyPiRepository)
assert poetry.pool.has_repository("explicit")
assert isinstance(poetry.pool.repository("explicit"), LegacyRepository)
assert {repo.name for repo in poetry.pool.repositories} == {"PyPI"}
assert io.fetch_error() == ""
def test_poetry_with_explicit_pypi_and_other(
fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
io = BufferedIO()
poetry = Factory().create_poetry(fixture_dir("with_explicit_pypi_and_other"), io=io)
assert len(poetry.pool.repositories) == 1
assert len(poetry.pool.all_repositories) == 2
error = io.fetch_error()
assert error == ""
@pytest.mark.parametrize(
"project", ["with_explicit_pypi_no_other", "with_explicit_pypi_and_other_explicit"]
)
def test_poetry_with_pypi_explicit_only(
project: str, fixture_dir: FixtureDirGetter, with_simple_keyring: None
) -> None:
with pytest.raises(PoetryError) as e:
Factory().create_poetry(fixture_dir(project))
assert str(e.value) == "At least one source must not be configured as 'explicit'."
def test_validate(fixture_dir: FixtureDirGetter) -> None:
complete = TOMLFile(fixture_dir("complete.toml"))
pyproject: dict[str, Any] = complete.read()
assert Factory.validate(pyproject) == {"errors": [], "warnings": []}
def test_validate_fails(fixture_dir: FixtureDirGetter) -> None:
complete = TOMLFile(fixture_dir("complete.toml"))
pyproject: dict[str, Any] = complete.read()
pyproject["tool"]["poetry"]["this key is not in the schema"] = ""
pyproject["tool"]["poetry"]["source"] = {}
expected = [
"tool.poetry.source must be array",
(
"Additional properties are not allowed "
"('this key is not in the schema' was unexpected)"
),
]
assert Factory.validate(pyproject) == {"errors": expected, "warnings": []}
def test_create_poetry_fails_on_invalid_configuration(
fixture_dir: FixtureDirGetter,
) -> None:
with pytest.raises(RuntimeError) as e:
Factory().create_poetry(fixture_dir("invalid_pyproject_dep_name"))
expected = """\
The Poetry configuration is invalid:
- Project name (invalid) is same as one of its dependencies
"""
assert str(e.value) == expected
def test_create_poetry_fails_on_nameless_project(
fixture_dir: FixtureDirGetter,
) -> None:
with pytest.raises(RuntimeError) as e:
Factory().create_poetry(fixture_dir("nameless_pyproject"))
expected = """\
The Poetry configuration is invalid:
- Either [project.name] or [tool.poetry.name] is required in package mode.
"""
assert str(e.value) == expected
def test_create_poetry_with_local_config(fixture_dir: FixtureDirGetter) -> None:
poetry = Factory().create_poetry(fixture_dir("with_local_config"))
assert not poetry.config.get("virtualenvs.in-project")
assert not poetry.config.get("virtualenvs.create")
assert not poetry.config.get("virtualenvs.options.always-copy")
assert not poetry.config.get("virtualenvs.options.no-pip")
assert not poetry.config.get("virtualenvs.options.system-site-packages")
def test_create_poetry_with_plugins(
mocker: MockerFixture, fixture_dir: FixtureDirGetter
) -> None:
mock_metadata_entry_points(mocker, MyPlugin)
poetry = Factory().create_poetry(fixture_dir("sample_project"))
assert poetry.package.readmes == (Path("README.md"),)
@pytest.mark.parametrize(
("source", "expected"),
[
({}, "Missing [name] in source."),
({"name": "foo"}, "Missing [url] in source 'foo'."),
(
{"name": "PyPI", "url": "https://example.com"},
"The PyPI repository cannot be configured with a custom url.",
),
],
)
def test_create_package_source_invalid(
source: dict[str, str],
expected: str,
config: Config,
fixture_dir: FixtureDirGetter,
) -> None:
with pytest.raises(InvalidSourceError) as e:
Factory.create_package_source(source, config=config)
Factory().create_poetry(fixture_dir("with_source_pypi_url"))
assert str(e.value) == expected
| MyPlugin |
python | ray-project__ray | python/ray/tune/impl/tuner_internal.py | {
"start": 1453,
"end": 27258
} | class ____:
"""The real implementation behind external facing ``Tuner``.
The external facing ``Tuner`` multiplexes between local Tuner and remote Tuner
depending on whether in Ray client mode.
In Ray client mode, external ``Tuner`` wraps ``TunerInternal`` into a remote actor,
which is guaranteed to be placed on head node.
``TunerInternal`` can be constructed from fresh, in which case, ``trainable`` needs
to be provided, together with optional ``param_space``, ``tune_config`` and
``run_config``.
It can also be restored from a previous failed run (given ``restore_path``).
Args:
restore_path: The path from where the Tuner can be restored. If provided, None
of the rest args are needed.
resume_config: Resume config to configure which trials to continue.
trainable: The trainable to be tuned.
param_space: Search space of the tuning job.
One thing to note is that both preprocessor and dataset can be tuned here.
tune_config: Tuning algorithm specific configs.
Refer to ray.tune.tune_config.TuneConfig for more info.
run_config: Runtime configuration that is specific to individual trials.
If passed, this will overwrite the run config passed to the Trainer,
if applicable. Refer to ray.tune.RunConfig for more info.
"""
def __init__(
self,
restore_path: str = None,
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
resume_config: Optional[ResumeConfig] = None,
trainable: Optional[TrainableTypeOrTrainer] = None,
param_space: Optional[Dict[str, Any]] = None,
tune_config: Optional[TuneConfig] = None,
run_config: Optional[RunConfig] = None,
_tuner_kwargs: Optional[Dict] = None,
_entrypoint: AirEntrypoint = AirEntrypoint.TUNER,
):
from ray.train.trainer import BaseTrainer
if isinstance(trainable, BaseTrainer):
if _v2_migration_warnings_enabled():
_log_deprecation_warning(
"The Ray Train + Ray Tune integration has been reworked. "
"Passing a Trainer to the Tuner is deprecated and will be removed "
"in a future release. "
f"{V2_MIGRATION_GUIDE_MESSAGE}"
)
run_config = self._choose_run_config(
tuner_run_config=run_config,
trainer=trainable,
param_space=param_space,
)
self._tune_config = tune_config or TuneConfig()
self._run_config = copy.copy(run_config) or RunConfig()
self._entrypoint = _entrypoint
# Restore from Tuner checkpoint.
if restore_path:
self._restore_from_path_or_uri(
path_or_uri=restore_path,
trainable=trainable,
overwrite_param_space=param_space,
resume_config=resume_config,
storage_filesystem=storage_filesystem,
)
return
# Start from fresh
if not trainable:
raise TuneError("You need to provide a trainable to tune.")
if self._entrypoint == AirEntrypoint.TUNER and not isinstance(
self._run_config, ray.tune.RunConfig
):
if _v2_migration_warnings_enabled():
_log_deprecation_warning(
"The `RunConfig` class should be imported from `ray.tune` "
"when passing it to the Tuner. Please update your imports. "
f"{V2_MIGRATION_GUIDE_MESSAGE}"
)
self.trainable = trainable
assert self.converted_trainable
self._validate_trainable(self.converted_trainable)
self.param_space = param_space
self._resume_config = None
self._is_restored = False
self._tuner_kwargs = copy.deepcopy(_tuner_kwargs) or {}
self._experiment_analysis = None
self._run_config.name = (
self._run_config.name
or StorageContext.get_experiment_dir_name(self.converted_trainable)
)
# The storage context here is only used to access the resolved
# storage fs and experiment path, in order to avoid duplicating that logic.
# This is NOT the storage context object that gets passed to remote workers.
storage = StorageContext(
storage_path=self._run_config.storage_path,
experiment_dir_name=self._run_config.name,
storage_filesystem=self._run_config.storage_filesystem,
)
fs = storage.storage_filesystem
fs.create_dir(storage.experiment_fs_path)
with fs.open_output_stream(
Path(storage.experiment_fs_path, _TUNER_PKL).as_posix()
) as f:
f.write(pickle.dumps(self.__getstate__()))
def get_run_config(self) -> RunConfig:
return self._run_config
# For Jupyter output with Ray Client
def set_run_config_and_remote_string_queue(
self, run_config: RunConfig, string_queue: "Queue"
):
self._run_config = run_config
self._tuner_kwargs["_remote_string_queue"] = string_queue
def clear_remote_string_queue(self):
self._tuner_kwargs.pop("_remote_string_queue", None)
def _expected_utilization(self, cpus_per_trial, cpus_total):
num_samples = self._tune_config.num_samples
if num_samples < 0: # TODO: simplify this in Tune
num_samples = math.inf
concurrent_trials = self._tune_config.max_concurrent_trials or 0
if concurrent_trials < 1: # TODO: simplify this in Tune
concurrent_trials = math.inf
actual_concurrency = min(
(
(cpus_total // cpus_per_trial) if cpus_per_trial else 0,
num_samples,
concurrent_trials,
)
)
return (actual_concurrency * cpus_per_trial) / (cpus_total + 0.001)
def _validate_trainable(
self, trainable: TrainableType, required_trainable_name: Optional[str] = None
):
"""Determines whether or not the trainable is valid.
This includes checks on the serializability of the trainable, as well
asserting that the trainable name is as expected on restoration.
This trainable name validation is needed due to an implementation detail
where the trainable name (which is differently generated depending on
the trainable type) is saved in the Trial metadata and needs to match
upon restoration. This does not affect the typical path, since `Tuner.restore`
expects the exact same trainable (which will have the same name).
Raises:
ValueError: if the trainable name does not match or if the trainable
is not serializable.
"""
try:
pickle.dumps(trainable)
except TypeError as e:
sio = io.StringIO()
inspect_serializability(trainable, print_file=sio)
msg = (
"The provided trainable is not serializable, which is a requirement "
"since the trainable is serialized and deserialized when transferred "
"to remote workers. See below for a trace of the non-serializable "
"objects that were found in your trainable:\n"
f"{sio.getvalue()}"
)
raise TypeError(msg) from e
if not required_trainable_name:
return
trainable_name = Experiment.get_trainable_name(trainable)
if trainable_name != required_trainable_name:
raise ValueError(
"Invalid `trainable` input to `Tuner.restore()`. To fix this error, "
"pass in the same trainable that was used to initialize the Tuner. "
"Got a trainable with identifier "
f"'{trainable_name}' but expected '{required_trainable_name}'."
)
def _set_trainable_on_restore(
self, trainable: TrainableType, old_trainable_name: Optional[str]
):
from ray.train.base_trainer import BaseTrainer
self.trainable = trainable
assert self.converted_trainable
self._validate_trainable(
trainable=self.converted_trainable,
required_trainable_name=old_trainable_name,
)
if isinstance(self.trainable, BaseTrainer):
# Log a warning in case the user tries to modify the
# `RunConfig` from the Trainer
trainer: BaseTrainer = self.trainable
# Only log if the Trainer has a non-default RunConfig
if trainer.run_config != RunConfig():
logger.warning(
"The Tune experiment will restore using the original run's "
"`RunConfig`. If you made any changes to the `RunConfig` "
"within the Trainer you passed into `Tuner.restore`, "
"they will be ignored in the resumed run."
)
trainer.run_config = self._run_config
def _validate_param_space_on_restore(
self,
new_param_space: Dict[str, Any],
flattened_param_space_keys: Optional[List[str]],
):
"""Determines whether the (optionally) re-specified `param_space` is valid.
This method performs very loose validation on the new param_space to
prevent users from trying to specify new hyperparameters to tune over.
Raises:
ValueError: if not all keys match the original param_space.
"""
if flattened_param_space_keys is None:
# Backwards compatibility: skip validation
return
keys = sorted(flatten_dict(new_param_space).keys())
if keys != flattened_param_space_keys:
raise ValueError(
"Invalid `param_space` input to `Tuner.restore()`. To fix this error, "
"pass in the same `param_space` that was used to initialize the Tuner. "
"Only re-specify the `param_space` to refresh Ray object references "
"that no longer exist due to restoring from a new Ray cluster session. "
"It should not be used to introduce new hyperparameters to tune."
f"\n\nGot: {keys}\nExpected: {flattened_param_space_keys}"
)
def _set_param_space_on_restore(
self,
param_space: Optional[Dict[str, Any]],
flattened_param_space_keys: Optional[List[str]],
):
self.param_space = param_space
if self.param_space is not None:
# param_space = None -> use the original param_space
self._validate_param_space_on_restore(
new_param_space=self.param_space,
flattened_param_space_keys=flattened_param_space_keys,
)
def _load_tuner_state(
self, tuner_state: Dict[str, Any]
) -> Tuple[Optional[str], Optional[List[str]]]:
"""Loads Tuner state from the previously saved `tuner.pkl`.
Args:
tuner_pkl_path: pathlib.Path of the `tuner.pkl` file saved during the
original Tuner initialization.
Returns:
tuple: of `(old_trainable_name, flattened_param_space_keys)` used for
validating the re-specified `trainable` and `param_space`.
"""
# NOTE: These are magic keys used for validating restore args.
old_trainable_name = tuner_state.pop("__trainable_name", None)
flattened_param_space_keys = tuner_state.pop(
"__flattened_param_space_keys", None
)
self.__setstate__(tuner_state)
return old_trainable_name, flattened_param_space_keys
def _restore_from_path_or_uri(
self,
path_or_uri: str,
trainable: TrainableTypeOrTrainer,
overwrite_param_space: Optional[Dict[str, Any]],
resume_config: ResumeConfig,
storage_filesystem: Optional[pyarrow.fs.FileSystem],
):
fs, fs_path = get_fs_and_path(path_or_uri, storage_filesystem)
with fs.open_input_file(Path(fs_path, _TUNER_PKL).as_posix()) as f:
tuner_state = pickle.loads(f.readall())
old_trainable_name, flattened_param_space_keys = self._load_tuner_state(
tuner_state
)
# Perform validation and set the re-specified `trainable` and `param_space`
self._set_trainable_on_restore(
trainable=trainable, old_trainable_name=old_trainable_name
)
self._set_param_space_on_restore(
param_space=overwrite_param_space,
flattened_param_space_keys=flattened_param_space_keys,
)
# Update RunConfig to reflect changes in the experiment directory
path_or_uri_obj = URI(path_or_uri)
# Infer the `storage_path` and run `name` of the restored run using the
# experiment directory.
# Ex: ~/ray_results/exp_name -> ~/ray_results, exp_name
# Ex: s3://bucket/exp_name -> s3://bucket, exp_name
self._run_config.name = path_or_uri_obj.name
self._run_config.storage_path = str(path_or_uri_obj.parent)
# Update the storage_filesystem with the one passed in on restoration, if any.
self._run_config.storage_filesystem = storage_filesystem
# Load the experiment results at the point where it left off.
try:
self._experiment_analysis = ExperimentAnalysis(
experiment_checkpoint_path=path_or_uri,
default_metric=self._tune_config.metric,
default_mode=self._tune_config.mode,
storage_filesystem=storage_filesystem,
)
except Exception:
self._experiment_analysis = None
self._resume_config = resume_config
self._is_restored = True
def _choose_run_config(
self,
tuner_run_config: Optional[RunConfig],
trainer: "BaseTrainer",
param_space: Optional[Dict[str, Any]],
) -> RunConfig:
"""Chooses which `RunConfig` to use when multiple can be passed in
through a Trainer or the Tuner itself.
Args:
tuner_run_config: The run config passed into the Tuner constructor.
trainer: The Trainer instance to use with Tune, which may have
a RunConfig specified by the user.
param_space: The param space passed to the Tuner.
Raises:
ValueError: if the `run_config` is specified as a hyperparameter.
"""
if param_space and "run_config" in param_space:
raise ValueError(
"`RunConfig` cannot be tuned as part of the `param_space`! "
"Move the run config to be a parameter of the `Tuner`: "
"Tuner(..., run_config=RunConfig(...))"
)
# Both Tuner RunConfig + Trainer RunConfig --> prefer Tuner RunConfig
if tuner_run_config and trainer.run_config != ray.train.RunConfig():
logger.info(
"A `RunConfig` was passed to both the `Tuner` and the "
f"`{trainer.__class__.__name__}`. The run config passed to "
"the `Tuner` is the one that will be used."
)
return tuner_run_config
# No Tuner RunConfig -> pass the Trainer config through
# This returns either a user-specified config, or the default RunConfig
# if nothing was provided to both the Trainer or Tuner.
if not tuner_run_config:
return trainer.run_config
# Tuner RunConfig + No Trainer RunConfig --> Use the Tuner config
return tuner_run_config
def _process_scaling_config(self) -> None:
"""Converts ``self._param_space["scaling_config"]`` to a dict.
The dict is converted back to a dataclass by the Trainer, after the
Tune search specification is resolved.
"""
# TODO: introduce `ray.tune.sample.TuneableDataclass` and allow Tune to
# natively resolve specs with dataclasses.
scaling_config = self._param_space.get("scaling_config")
if not isinstance(scaling_config, ray.train.ScalingConfig):
return
self._param_space["scaling_config"] = scaling_config.__dict__.copy()
@property
def trainable(self) -> TrainableTypeOrTrainer:
return self._trainable
@property
def converted_trainable(self) -> TrainableType:
return self._converted_trainable
@trainable.setter
def trainable(self, trainable: TrainableTypeOrTrainer):
self._trainable = trainable
self._converted_trainable = self._convert_trainable(trainable)
@property
def param_space(self) -> Optional[Dict[str, Any]]:
return self._param_space
@param_space.setter
def param_space(self, param_space: Optional[Dict[str, Any]]):
# Handle any configs that adhere to the `to_dict` interface.
# Ex: AlgorithmConfig from RLlib
if isinstance(param_space, _Config):
param_space = param_space.to_dict()
if not isinstance(param_space, dict) and param_space is not None:
raise ValueError(
"The `param_space` passed to the `Tuner` must be a dict. "
f"Got '{type(param_space)}' instead."
)
self._param_space = param_space
if param_space:
self._process_scaling_config()
def _convert_trainable(self, trainable: TrainableTypeOrTrainer) -> TrainableType:
"""Converts a Trainer to a Tune trainable and saves the converted
trainable. If not using a Trainer, this leaves the trainable as is."""
from ray.train.trainer import BaseTrainer
return (
trainable.as_trainable()
if isinstance(trainable, BaseTrainer)
else trainable
)
def fit(self) -> ResultGrid:
trainable = self.converted_trainable
param_space = copy.deepcopy(self.param_space)
if not self._is_restored:
analysis = self._fit_internal(trainable, param_space)
else:
analysis = self._fit_resume(trainable, param_space)
self._experiment_analysis = analysis
return ResultGrid(self._experiment_analysis)
def get_results(self) -> ResultGrid:
if not self._experiment_analysis:
raise RuntimeError(
"Can't return results as experiment has not been run, yet. "
"Call `Tuner.fit()` to run the experiment first."
)
return ResultGrid(self._experiment_analysis)
def _get_tune_run_arguments(self, trainable: TrainableType) -> Dict[str, Any]:
"""Get tune.run arguments common for both new and resumed runs."""
# Avoid overwriting the originally configured checkpoint config.
checkpoint_config = copy.deepcopy(self._run_config.checkpoint_config)
if checkpoint_config.checkpoint_frequency:
# Function trainables (and thus most of our trainers) usually don't handle
# this argument.
handle_checkpoint_freq = getattr(
trainable, "_handles_checkpoint_freq", None
)
if handle_checkpoint_freq is False:
# If we specifically know this trainable doesn't support the
# argument, raise an error
raise ValueError(
"You passed `checkpoint_frequency="
f"{checkpoint_config.checkpoint_frequency}` to your "
"CheckpointConfig, but this trainer does not support "
"this argument. If you passed in a Trainer that takes in a "
"custom training loop, you will need to "
"report a checkpoint every `checkpoint_frequency` iterations "
"within your training loop using "
"`ray.tune.report(metrics=..., checkpoint=...)` "
"to get this behavior."
)
elif handle_checkpoint_freq is True:
# If we specifically support it, it's handled in the training loop,
# so we disable tune's bookkeeping.
checkpoint_config.checkpoint_frequency = 0
# Otherwise, the trainable is not a Trainer and we just keep the
# user-supplied value.
# Function trainables will raise a runtime error later if set > 0
if checkpoint_config.checkpoint_at_end is not None:
# Again, function trainables usually don't handle this argument.
handle_cp_at_end = getattr(trainable, "_handles_checkpoint_at_end", None)
if handle_cp_at_end is False:
# If we specifically know we don't support it, raise an error.
raise ValueError(
"You passed `checkpoint_at_end="
f"{checkpoint_config.checkpoint_at_end}` "
"to your CheckpointConfig, but this trainer does not support "
"this argument. If you passed in a Trainer that takes in a "
"custom training loop, you should include one last call to "
"`ray.tune.report(metrics=..., checkpoint=...)` "
"at the end of your training loop to get this behavior."
)
elif handle_cp_at_end is True:
# If we specifically support it, it's handled in the training loop,
# so we disable tune's internal bookkeeping.
checkpoint_config.checkpoint_at_end = False
# If this is a user-defined trainable, just keep the value
# Function trainables will raise a runtime error later if set to True
else:
# Set default to False for function trainables and True for everything else
if is_function_trainable(trainable):
checkpoint_config.checkpoint_at_end = False
else:
checkpoint_config.checkpoint_at_end = True
return dict(
storage_path=self._run_config.storage_path,
storage_filesystem=self._run_config.storage_filesystem,
name=self._run_config.name,
mode=self._tune_config.mode,
metric=self._tune_config.metric,
callbacks=self._run_config.callbacks,
sync_config=self._run_config.sync_config,
stop=self._run_config.stop,
max_failures=self._run_config.failure_config.max_failures,
checkpoint_config=checkpoint_config,
raise_on_failed_trial=False,
fail_fast=(self._run_config.failure_config.fail_fast),
progress_reporter=self._run_config.progress_reporter,
verbose=self._run_config.verbose,
reuse_actors=self._tune_config.reuse_actors,
max_concurrent_trials=self._tune_config.max_concurrent_trials,
time_budget_s=self._tune_config.time_budget_s,
trial_name_creator=self._tune_config.trial_name_creator,
trial_dirname_creator=self._tune_config.trial_dirname_creator,
_entrypoint=self._entrypoint,
# Deprecated
chdir_to_trial_dir=self._tune_config.chdir_to_trial_dir,
)
def _fit_internal(
self, trainable: TrainableType, param_space: Optional[Dict[str, Any]]
) -> ExperimentAnalysis:
"""Fitting for a fresh Tuner."""
args = {
**self._get_tune_run_arguments(trainable),
**dict(
run_or_experiment=trainable,
config=param_space,
num_samples=self._tune_config.num_samples,
search_alg=self._tune_config.search_alg,
scheduler=self._tune_config.scheduler,
log_to_file=self._run_config.log_to_file,
),
**self._tuner_kwargs,
}
analysis = run(
**args,
)
self.clear_remote_string_queue()
return analysis
def _fit_resume(
self, trainable: TrainableType, param_space: Optional[Dict[str, Any]]
) -> ExperimentAnalysis:
"""Fitting for a restored Tuner."""
assert self._resume_config
args = {
**self._get_tune_run_arguments(trainable),
**dict(
run_or_experiment=trainable,
config=param_space,
resume_config=self._resume_config,
search_alg=self._tune_config.search_alg,
scheduler=self._tune_config.scheduler,
),
**self._tuner_kwargs,
}
analysis = run(**args)
self.clear_remote_string_queue()
return analysis
def __getstate__(self):
state = self.__dict__.copy()
state["_tuner_kwargs"] = state["_tuner_kwargs"].copy()
state["_tuner_kwargs"].pop("_remote_string_queue", None)
state.pop(_TRAINABLE_KEY, None)
trainable = state.pop(_CONVERTED_TRAINABLE_KEY, None)
param_space = state.pop(_PARAM_SPACE_KEY, None)
state.pop(_EXPERIMENT_ANALYSIS_KEY, None)
state["__trainable_name"] = (
Experiment.get_trainable_name(trainable) if trainable else None
)
state["__flattened_param_space_keys"] = (
sorted(flatten_dict(param_space).keys())
if param_space is not None
else None
)
return state
def __setstate__(self, state):
# Make sure the magic metadata gets removed first.
state.pop("__flattened_param_space_keys", None)
state.pop("__trainable_name", None)
self.__dict__.update(state)
| TunerInternal |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 3094,
"end": 3188
} | class ____(MilvusException):
"""Raise when partitionkey are invalid"""
| PartitionKeyException |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 341,
"end": 490
} | class ____(Exception):
def __init__(self, message) -> None:
self.message = message
super().__init__(message)
| GreatExpectationsError |
python | mozilla__bleach | bleach/_vendor/html5lib/treebuilders/etree_lxml.py | {
"start": 1037,
"end": 1208
} | class ____(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
| DocumentType |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/dataproc.py | {
"start": 18083,
"end": 20751
} | class ____(DataprocBaseTrigger):
"""
DataprocCreateBatchTrigger run on the trigger worker to perform create Build operation.
:param batch_id: The ID of the build.
:param project_id: Google Cloud Project where the job is running
:param region: The Cloud Dataproc region in which to handle the request.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param polling_interval_seconds: polling period in seconds to check for the status
"""
def __init__(self, batch_id: str, **kwargs):
super().__init__(**kwargs)
self.batch_id = batch_id
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize DataprocBatchTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocBatchTrigger",
{
"batch_id": self.batch_id,
"project_id": self.project_id,
"region": self.region,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self):
while True:
batch = await self.get_async_hook().get_batch(
project_id=self.project_id, region=self.region, batch_id=self.batch_id
)
state = batch.state
if state in (Batch.State.FAILED, Batch.State.SUCCEEDED, Batch.State.CANCELLED):
break
self.log.info("Current state is %s", Batch.State(state).name)
self.log.info("Sleeping for %s seconds.", self.polling_interval_seconds)
await asyncio.sleep(self.polling_interval_seconds)
yield TriggerEvent(
{
"batch_id": self.batch_id,
"batch_state": Batch.State(state).name,
"batch_state_message": batch.state_message,
}
)
| DataprocBatchTrigger |
python | pytorch__pytorch | test/distributed/test_c10d_common.py | {
"start": 6747,
"end": 7161
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
| Net |
python | django__django | tests/custom_lookups/tests.py | {
"start": 1518,
"end": 1586
} | class ____(Div3Transform):
bilateral = True
| Div3BilateralTransform |
python | pypa__warehouse | warehouse/packaging/services.py | {
"start": 4060,
"end": 4279
} | class ____(GenericLocalBlobStorage):
@classmethod
def create_service(cls, context, request):
return cls(request.registry.settings["archive_files.path"])
@implementer(ISimpleStorage)
| LocalArchiveFileStorage |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/input.py | {
"start": 20510,
"end": 21007
} | class ____(NamedTuple("_GraphIn", [("description", PublicAttr[Optional[str]])])):
"""Represents information about an input that a graph maps.
Args:
description (Optional[str]): Human-readable description of the input.
"""
def __new__(cls, description: Optional[str] = None):
return super().__new__(cls, description=description)
def to_definition(self, name: str) -> InputDefinition:
return InputDefinition(name=name, description=self.description)
| GraphIn |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 28208,
"end": 28848
} | class ____(OutEdgeDataView):
"""An EdgeDataView class for outward edges of DiGraph; See EdgeDataView"""
__slots__ = ()
def __iter__(self):
return (
self._report(nbr, n, dd)
for n, nbrs in self._nodes_nbrs()
for nbr, dd in nbrs.items()
)
def __contains__(self, e):
u, v = e[:2]
if self._nbunch is not None and v not in self._nbunch:
return False # this edge doesn't end in nbunch
try:
ddict = self._adjdict[v][u]
except KeyError:
return False
return e == self._report(u, v, ddict)
| InEdgeDataView |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 1387,
"end": 1454
} | class ____(range): # [inherit-non-class]
pass
| NotInheritableRange |
python | huggingface__transformers | tests/optimization/test_optimization.py | {
"start": 1963,
"end": 3745
} | class ____(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam_w(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = torch.optim.AdamW(params=[w], lr=2e-1, weight_decay=0.0)
for _ in range(100):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
def test_adafactor(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = Adafactor(
params=[w],
lr=1e-2,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
relative_step=False,
scale_parameter=False,
warmup_init=False,
)
for _ in range(1000):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
@require_torch
| OptimizationTest |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 20725,
"end": 22589
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "rΓ©tt Γ ΓΎessu"
assert self.locale._format_timeframe("second", -1) == "sekΓΊndu"
assert self.locale._format_timeframe("second", 1) == "sekΓΊndu"
assert self.locale._format_timeframe("minute", -1) == "einni mΓnΓΊtu"
assert self.locale._format_timeframe("minute", 1) == "eina mΓnΓΊtu"
assert self.locale._format_timeframe("minutes", -2) == "2 mΓnΓΊtum"
assert self.locale._format_timeframe("minutes", 2) == "2 mΓnΓΊtur"
assert self.locale._format_timeframe("hour", -1) == "einum tΓma"
assert self.locale._format_timeframe("hour", 1) == "einn tΓma"
assert self.locale._format_timeframe("hours", -2) == "2 tΓmum"
assert self.locale._format_timeframe("hours", 2) == "2 tΓma"
assert self.locale._format_timeframe("day", -1) == "einum degi"
assert self.locale._format_timeframe("day", 1) == "einn dag"
assert self.locale._format_timeframe("days", -2) == "2 dΓΆgum"
assert self.locale._format_timeframe("days", 2) == "2 daga"
assert self.locale._format_timeframe("month", -1) == "einum mΓ‘nuΓ°i"
assert self.locale._format_timeframe("month", 1) == "einn mΓ‘nuΓ°"
assert self.locale._format_timeframe("months", -2) == "2 mΓ‘nuΓ°um"
assert self.locale._format_timeframe("months", 2) == "2 mΓ‘nuΓ°i"
assert self.locale._format_timeframe("year", -1) == "einu Γ‘ri"
assert self.locale._format_timeframe("year", 1) == "eitt Γ‘r"
assert self.locale._format_timeframe("years", -2) == "2 Γ‘rum"
assert self.locale._format_timeframe("years", 2) == "2 Γ‘r"
with pytest.raises(ValueError):
self.locale._format_timeframe("years", 0)
@pytest.mark.usefixtures("lang_locale")
| TestIcelandicLocale |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/typehints.py | {
"start": 1872,
"end": 2140
} | class ____:
"""Class docstring."""
def __init__(self, x: int, *args: int, **kwargs: int) -> None:
"""Init docstring.
:param x: Some integer
:param args: Some integer
:param kwargs: Some integer
"""
| _ClassWithDocumentedInit |
python | huggingface__transformers | src/transformers/models/vitdet/modeling_vitdet.py | {
"start": 24838,
"end": 27237
} | class ____(VitDetPreTrainedModel):
def __init__(self, config: VitDetConfig):
super().__init__(config)
self.config = config
self.embeddings = VitDetEmbeddings(config)
self.encoder = VitDetEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> VitDetEmbeddings:
return self.embeddings.projection
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Examples:
```python
>>> from transformers import VitDetConfig, VitDetModel
>>> import torch
>>> config = VitDetConfig()
>>> model = VitDetModel(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 768, 14, 14]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
ViTDet backbone, to be used with frameworks like Mask R-CNN.
"""
)
| VitDetModel |
python | ray-project__ray | rllib/env/tests/test_single_agent_episode.py | {
"start": 1732,
"end": 34316
} | class ____(unittest.TestCase):
def test_init(self):
"""Tests initialization of `SingleAgentEpisode`.
Three cases are tested:
1. Empty episode with default starting timestep.
2. Empty episode starting at `t_started=10`. This is only interesting
for ongoing episodes, where we do not want to carry on the stale
entries from the last rollout.
3. Initialization with pre-collected data.
"""
# Create empty episode.
episode = SingleAgentEpisode()
# Empty episode should have a start point and count of zero.
self.assertTrue(episode.t_started == episode.t == 0)
# Create an episode with a specific starting point.
episode = SingleAgentEpisode(t_started=10)
self.assertTrue(episode.t == episode.t_started == 10)
episode = self._create_episode(num_data=100)
# The starting point and count should now be at `len(observations) - 1`.
self.assertTrue(len(episode) == 100)
self.assertTrue(episode.t == 100)
self.assertTrue(episode.t_started == 0)
# Build the same episode, but with a 10 ts lookback buffer.
episode = self._create_episode(num_data=100, len_lookback_buffer=10)
# The lookback buffer now takes 10 ts and the length of the episode is only 90.
self.assertTrue(len(episode) == 90)
# `t_started` is 0 by default.
self.assertTrue(episode.t_started == 0)
self.assertTrue(episode.t == 90)
self.assertTrue(len(episode.rewards) == 90)
self.assertTrue(len(episode.rewards.data) == 100)
# Build the same episode, but with a 10 ts lookback buffer AND a specific
# `t_started`.
episode = self._create_episode(
num_data=100, len_lookback_buffer=10, t_started=50
)
# The lookback buffer now takes 10 ts and the length of the episode is only 90.
self.assertTrue(len(episode) == 90)
self.assertTrue(episode.t_started == 50)
self.assertTrue(episode.t == 140)
self.assertTrue(len(episode.rewards) == 90)
self.assertTrue(len(episode.rewards.data) == 100)
def test_add_env_reset(self):
"""Tests adding initial observations and infos.
This test ensures that when initial observation and info are provided
the length of the lists are correct and the timestep is still at zero,
as the agent has not stepped, yet.
"""
# Create empty episode.
episode = SingleAgentEpisode()
# Create environment.
env = gym.make("CartPole-v1")
# Add initial observations.
obs, info = env.reset()
episode.add_env_reset(observation=obs, infos=info)
# Assert that the observations are added to their list.
self.assertTrue(len(episode.observations) == 1)
# Assert that the infos are added to their list.
self.assertTrue(len(episode.infos) == 1)
# Assert that the timesteps are still at zero as we have not stepped, yet.
self.assertTrue(episode.t == episode.t_started == 0)
def test_add_env_step(self):
"""Tests if adding timestep data to a `SingleAgentEpisode` works.
Adding timestep data is the central part of collecting episode
dara. Here it is tested if adding to the internal data lists
works as intended and the timestep is increased during each step.
"""
# Create an empty episode and add initial observations.
episode = SingleAgentEpisode(len_lookback_buffer=10)
env = gym.make("CartPole-v1")
# Set the random seed (otherwise the episode will terminate at
# different points in each test run).
obs, info = env.reset(seed=0)
episode.add_env_reset(observation=obs, infos=info)
# Sample 100 timesteps and add them to the episode.
terminated = truncated = False
for i in range(100):
action = env.action_space.sample()
obs, reward, terminated, truncated, info = env.step(action)
episode.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
extra_model_outputs={"extra": np.random.random(1)},
)
if terminated or truncated:
break
# Assert that the episode timestep is at 100.
self.assertTrue(episode.t == len(episode.observations) - 1 == i + 1)
# Assert that `t_started` stayed at zero.
self.assertTrue(episode.t_started == 0)
# Assert that all lists have the proper lengths.
self.assertTrue(
len(episode.actions)
== len(episode.rewards)
== len(episode.observations) - 1
== len(episode.infos) - 1
== i + 1
)
# Assert that the flags are set correctly.
self.assertTrue(episode.is_terminated == terminated)
self.assertTrue(episode.is_truncated == truncated)
self.assertTrue(episode.is_done == terminated or truncated)
def test_getters(self):
"""Tests whether the SingleAgentEpisode's getter methods work as expected."""
# Create a simple episode.
episode = SingleAgentEpisode(
observations=[0, 1, 2, 3, 4, 5, 6],
actions=[0, 1, 2, 3, 4, 5],
rewards=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
len_lookback_buffer=0,
)
check(episode.get_observations(0), 0)
check(episode.get_observations([0, 1]), [0, 1])
check(episode.get_observations([-1]), [6])
check(episode.get_observations(-2), 5)
check(episode.get_observations(slice(1, 3)), [1, 2])
check(episode.get_observations(slice(-3, None)), [4, 5, 6])
check(episode.get_actions(0), 0)
check(episode.get_actions([0, 1]), [0, 1])
check(episode.get_actions([-1]), [5])
check(episode.get_actions(-2), 4)
check(episode.get_actions(slice(1, 3)), [1, 2])
check(episode.get_actions(slice(-3, None)), [3, 4, 5])
check(episode.get_rewards(0), 0.0)
check(episode.get_rewards([0, 1]), [0.0, 0.1])
check(episode.get_rewards([-1]), [0.5])
check(episode.get_rewards(-2), 0.4)
check(episode.get_rewards(slice(1, 3)), [0.1, 0.2])
check(episode.get_rewards(slice(-3, None)), [0.3, 0.4, 0.5])
def test_cut(self):
"""Tests creation of a successor of a `SingleAgentEpisode` via the `cut` API.
This test makes sure that when creating a successor the successor's
data is coherent with the episode that should be succeeded.
Observation and info are available before each timestep; therefore
these data is carried over to the successor.
"""
# Create an empty episode.
episode_1 = SingleAgentEpisode()
# Create an environment.
env = TestEnv()
# Add initial observation.
init_obs, init_info = env.reset()
episode_1.add_env_reset(observation=init_obs, infos=init_info)
# Sample 100 steps.
for i in range(100):
action = i
obs, reward, terminated, truncated, info = env.step(action)
episode_1.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
extra_model_outputs={"extra": np.random.random(1)},
)
# Assert that the episode has indeed 100 timesteps.
self.assertTrue(episode_1.t == 100)
# Create a successor.
episode_2 = episode_1.cut()
# Assert that it has the same id.
self.assertEqual(episode_1.id_, episode_2.id_)
# Assert that the timestep starts at the end of the last episode.
self.assertTrue(episode_1.t == episode_2.t == episode_2.t_started)
# Assert that the last observation of `episode_1` is the first of
# `episode_2`.
self.assertTrue(episode_1.observations[-1] == episode_2.observations[0])
# Assert that the last info of `episode_1` is the first of episode_2`.
self.assertTrue(episode_1.infos[-1] == episode_2.infos[0])
# Test immutability.
action = 100
obs, reward, terminated, truncated, info = env.step(action)
episode_2.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
extra_model_outputs={"extra": np.random.random(1)},
)
# Assert that this does not change also the predecessor's data.
self.assertFalse(len(episode_1.observations) == len(episode_2.observations))
def test_slice(self):
"""Tests whether slicing with the []-operator works as expected."""
# Generate a simple single-agent episode.
observations = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
actions = observations[:-1]
rewards = [o / 10 for o in observations[:-1]]
episode = SingleAgentEpisode(
observations=observations,
actions=actions,
rewards=rewards,
len_lookback_buffer=0,
)
check(len(episode), 9)
# Slice the episode in different ways and check results.
for s in [
slice(None, None, None),
slice(-100, None, None),
slice(None, 1000, None),
slice(-1000, 1000, None),
]:
slice_ = episode[s]
check(len(slice_), len(episode))
check(slice_.observations, observations)
check(slice_.actions, observations[:-1])
check(slice_.rewards, [o / 10 for o in observations[:-1]])
check(slice_.is_done, False)
slice_ = episode[-100:]
check(len(slice_), len(episode))
check(slice_.observations, observations)
check(slice_.actions, observations[:-1])
check(slice_.rewards, [o / 10 for o in observations[:-1]])
check(slice_.is_done, False)
slice_ = episode[2:]
check(len(slice_), 7)
check(slice_.observations, [2, 3, 4, 5, 6, 7, 8, 9])
check(slice_.actions, [2, 3, 4, 5, 6, 7, 8])
check(slice_.rewards, [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[:1]
check(len(slice_), 1)
check(slice_.observations, [0, 1])
check(slice_.actions, [0])
check(slice_.rewards, [0.0])
check(slice_.is_done, False)
slice_ = episode[:3]
check(len(slice_), 3)
check(slice_.observations, [0, 1, 2, 3])
check(slice_.actions, [0, 1, 2])
check(slice_.rewards, [0.0, 0.1, 0.2])
check(slice_.is_done, False)
slice_ = episode[:-4]
check(len(slice_), 5)
check(slice_.observations, [0, 1, 2, 3, 4, 5])
check(slice_.actions, [0, 1, 2, 3, 4])
check(slice_.rewards, [0.0, 0.1, 0.2, 0.3, 0.4])
check(slice_.is_done, False)
slice_ = episode[-2:]
check(len(slice_), 2)
check(slice_.observations, [7, 8, 9])
check(slice_.actions, [7, 8])
check(slice_.rewards, [0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[-3:]
check(len(slice_), 3)
check(slice_.observations, [6, 7, 8, 9])
check(slice_.actions, [6, 7, 8])
check(slice_.rewards, [0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[-5:]
check(len(slice_), 5)
check(slice_.observations, [4, 5, 6, 7, 8, 9])
check(slice_.actions, [4, 5, 6, 7, 8])
check(slice_.rewards, [0.4, 0.5, 0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[-4:-2]
check(len(slice_), 2)
check(slice_.observations, [5, 6, 7])
check(slice_.actions, [5, 6])
check(slice_.rewards, [0.5, 0.6])
check(slice_.is_done, False)
slice_ = episode[-4:6]
check(len(slice_), 1)
check(slice_.observations, [5, 6])
check(slice_.actions, [5])
check(slice_.rewards, [0.5])
check(slice_.is_done, False)
slice_ = episode[1:3]
check(len(slice_), 2)
check(slice_.observations, [1, 2, 3])
check(slice_.actions, [1, 2])
check(slice_.rewards, [0.1, 0.2])
check(slice_.is_done, False)
# Generate a single-agent episode with lookback.
episode = SingleAgentEpisode(
observations=observations,
actions=actions,
rewards=rewards,
len_lookback_buffer=4, # some data is in lookback buffer
)
check(len(episode), 5)
# Slice the episode in different ways and check results.
for s in [
slice(None, None, None),
slice(-100, None, None),
slice(None, 1000, None),
slice(-1000, 1000, None),
]:
slice_ = episode[s]
check(len(slice_), len(episode))
check(slice_.observations, [4, 5, 6, 7, 8, 9])
check(slice_.actions, [4, 5, 6, 7, 8])
check(slice_.rewards, [0.4, 0.5, 0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[2:]
check(len(slice_), 3)
check(slice_.observations, [6, 7, 8, 9])
check(slice_.actions, [6, 7, 8])
check(slice_.rewards, [0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[:1]
check(len(slice_), 1)
check(slice_.observations, [4, 5])
check(slice_.actions, [4])
check(slice_.rewards, [0.4])
check(slice_.is_done, False)
slice_ = episode[:3]
check(len(slice_), 3)
check(slice_.observations, [4, 5, 6, 7])
check(slice_.actions, [4, 5, 6])
check(slice_.rewards, [0.4, 0.5, 0.6])
check(slice_.is_done, False)
slice_ = episode[:-4]
check(len(slice_), 1)
check(slice_.observations, [4, 5])
check(slice_.actions, [4])
check(slice_.rewards, [0.4])
check(slice_.is_done, False)
slice_ = episode[-2:]
check(len(slice_), 2)
check(slice_.observations, [7, 8, 9])
check(slice_.actions, [7, 8])
check(slice_.rewards, [0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[-3:]
check(len(slice_), 3)
check(slice_.observations, [6, 7, 8, 9])
check(slice_.actions, [6, 7, 8])
check(slice_.rewards, [0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[-5:]
check(len(slice_), 5)
check(slice_.observations, [4, 5, 6, 7, 8, 9])
check(slice_.actions, [4, 5, 6, 7, 8])
check(slice_.rewards, [0.4, 0.5, 0.6, 0.7, 0.8])
check(slice_.is_done, False)
slice_ = episode[-4:-2]
check(len(slice_), 2)
check(slice_.observations, [5, 6, 7])
check(slice_.actions, [5, 6])
check(slice_.rewards, [0.5, 0.6])
check(slice_.is_done, False)
slice_ = episode[-4:2]
check(len(slice_), 1)
check(slice_.observations, [5, 6])
check(slice_.actions, [5])
check(slice_.rewards, [0.5])
check(slice_.is_done, False)
slice_ = episode[1:3]
check(len(slice_), 2)
check(slice_.observations, [5, 6, 7])
check(slice_.actions, [5, 6])
check(slice_.rewards, [0.5, 0.6])
check(slice_.is_done, False)
# Even split (50/50).
episode = self._create_episode(100)
self.assertTrue(episode.t == 100 and episode.t_started == 0)
# Convert to numpy before splitting.
episode.to_numpy()
# Create two 50/50 episode chunks.
e1 = episode[:50]
self.assertTrue(e1.is_numpy)
e2 = episode.slice(slice(50, None))
self.assertTrue(e2.is_numpy)
# Make sure, `e1` and `e2` make sense.
self.assertTrue(len(e1) == 50)
self.assertTrue(len(e2) == 50)
self.assertTrue(e1.id_ == e2.id_)
self.assertTrue(e1.t_started == 0)
self.assertTrue(e1.t == 50)
self.assertTrue(e2.t_started == 50)
self.assertTrue(e2.t == 100)
# Make sure the chunks are not identical, but last obs of `e1` matches
# last obs of `e2`.
check(e1.get_observations(-1), e2.get_observations(0))
check(e1.observations[4], e2.observations[4], false=True)
check(e1.observations[10], e2.observations[10], false=True)
# Uneven split (33/66).
episode = self._create_episode(99)
self.assertTrue(episode.t == 99 and episode.t_started == 0)
# Convert to numpy before splitting.
episode.to_numpy()
# Create two 50/50 episode chunks.
e1 = episode.slice(slice(None, 33))
self.assertTrue(e1.is_numpy)
e2 = episode[33:]
self.assertTrue(e2.is_numpy)
# Make sure, `e1` and `e2` chunk make sense.
self.assertTrue(len(e1) == 33)
self.assertTrue(len(e2) == 66)
self.assertTrue(e1.id_ == e2.id_)
self.assertTrue(e1.t_started == 0)
self.assertTrue(e1.t == 33)
self.assertTrue(e2.t_started == 33)
self.assertTrue(e2.t == 99)
# Make sure the chunks are not identical, but last obs of `e1` matches
# last obs of `e2`.
check(e1.get_observations(-1), e2.get_observations(0))
check(e1.observations[4], e2.observations[4], false=True)
check(e1.observations[10], e2.observations[10], false=True)
# Split with lookback buffer (buffer=10, split=20/30).
len_lookback_buffer = 10
episode = self._create_episode(
num_data=60, t_started=15, len_lookback_buffer=len_lookback_buffer
)
self.assertTrue(episode.t == 65 and episode.t_started == 15)
# Convert to numpy before splitting.
episode.to_numpy()
# Create two 20/30 episode chunks.
e1 = episode.slice(slice(None, 20))
self.assertTrue(e1.is_numpy)
e2 = episode[20:]
self.assertTrue(e2.is_numpy)
# Make sure, `e1` and `e2` make sense.
self.assertTrue(len(e1) == 20)
self.assertTrue(len(e2) == 30)
self.assertTrue(e1.id_ == e2.id_)
self.assertTrue(e1.t_started == 15)
self.assertTrue(e1.t == 35)
self.assertTrue(e2.t_started == 35)
self.assertTrue(e2.t == 65)
# Make sure the chunks are not identical, but last obs of `e1` matches
# last obs of `e2`.
check(e1.get_observations(-1), e2.get_observations(0))
check(e1.observations[5], e2.observations[5], false=True)
check(e1.observations[11], e2.observations[11], false=True)
# Make sure the lookback buffers of both chunks are still working.
check(
e1.get_observations(-1, neg_index_as_lookback=True),
episode.observations.data[len_lookback_buffer - 1],
)
check(
e1.get_actions(-1, neg_index_as_lookback=True),
episode.actions.data[len_lookback_buffer - 1],
)
check(
e2.get_observations([-5, -2], neg_index_as_lookback=True),
[
episode.observations.data[20 + len_lookback_buffer - 5],
episode.observations.data[20 + len_lookback_buffer - 2],
],
)
check(
e2.get_rewards([-5, -2], neg_index_as_lookback=True),
[
episode.rewards.data[20 + len_lookback_buffer - 5],
episode.rewards.data[20 + len_lookback_buffer - 2],
],
)
def test_concat_episode(self):
"""Tests if concatenation of two `SingleAgentEpisode`s works.
This test ensures that concatenation of two episodes work. Note that
concatenation should only work for two chunks of the same episode, i.e.
they have the same `id_` and one should be the successor of the other.
It is also tested that concatenation fails, if timesteps do not match or
the episode to which we want to concatenate is already terminated.
"""
# Create two episodes and fill them with 100 timesteps each.
env = TestEnv()
init_obs, init_info = env.reset()
episode_1 = SingleAgentEpisode()
episode_1.add_env_reset(observation=init_obs, infos=init_info)
# Sample 100 timesteps.
for i in range(100):
action = i
obs, reward, terminated, truncated, info = env.step(action)
episode_1.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
extra_model_outputs={"extra": np.random.random(1)},
)
# Create a successor.
episode_2 = episode_1.cut()
# Now, sample 100 more timesteps.
for i in range(100, 200):
action = i
obs, reward, terminated, truncated, info = env.step(action)
episode_2.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
extra_model_outputs={"extra": np.random.random(1)},
)
# Assert that the second episode's `t_started` is at the first episode's
# `t`.
self.assertTrue(episode_1.t == episode_2.t_started)
# Assert that the second episode's `t` is at 200.
self.assertTrue(episode_2.t == 200)
# Manipulate the id of the second episode and make sure an error is
# thrown during concatenation.
episode_2.id_ = "wrong"
with self.assertRaises(AssertionError):
episode_1.concat_episode(episode_2)
# Reset the id.
episode_2.id_ = episode_1.id_
# Assert that when timesteps do not match an error is thrown.
episode_2.t_started += 1
with self.assertRaises(AssertionError):
episode_1.concat_episode(episode_2)
# Reset the timestep.
episode_2.t_started -= 1
# Assert that when the first episode is already done no concatenation can take
# place.
episode_1.is_terminated = True
with self.assertRaises(AssertionError):
episode_1.concat_episode(episode_2)
# Reset `is_terminated`.
episode_1.is_terminated = False
# Concatenate the episodes.
episode_1.concat_episode(episode_2)
# Assert that the concatenated episode start at `t_started=0`
# and has 200 sampled steps, i.e. `t=200`.
self.assertTrue(episode_1.t_started == 0)
self.assertTrue(episode_1.t == 200)
# Assert that all lists have appropriate length.
self.assertTrue(
len(episode_1.actions)
== len(episode_1.rewards)
== len(episode_1.observations) - 1
== len(episode_1.infos) - 1
== 200
)
# Assert that specific observations in the two episodes match.
self.assertEqual(episode_2.observations[5], episode_1.observations[105])
# Assert that they are not the same object.
# TODO (sven): Do we really need a deepcopy here?
# self.assertNotEqual(id(episode_2.observations[5]),
# id(episode_1.observations[105]))
def test_concat_episode_with_complex_obs(self):
"""Tests if concatenation of two `SingleAgentEpisode`s works with complex observations (e.g. dict)."""
# Create test environment that utilises dictionary based observations
env = DictTestEnv()
init_obs, init_info = env.reset()
episode_1 = SingleAgentEpisode()
episode_1.add_env_reset(observation=init_obs, infos=init_info)
for i in range(4):
action = i
obs, reward, terminated, truncated, info = env.step(action)
episode_1.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
)
assert len(episode_1) == 4
# cut episode 1 to create episode 2
episode_2 = episode_1.cut()
# fill with data
for i in range(6):
action = i
obs, reward, terminated, truncated, info = env.step(action)
episode_2.add_env_step(
observation=obs,
action=action,
reward=reward,
infos=info,
terminated=terminated,
truncated=truncated,
)
assert len(episode_2) == 6
# concat the episodes and check that episode 1 contains episode 2 content
episode_1.concat_episode(episode_2)
assert len(episode_1) == 10
def test_get_and_from_state(self):
"""Tests the `get_state` and `set_state` methods of `SingleAgentEpisode`.
This test ensures that the state of an episode can be stored and
restored correctly.
"""
# Create an episode and fill it with 100 timesteps.
episode = self._create_episode(100)
# Store the state.
state = episode.get_state()
episode_2 = SingleAgentEpisode.from_state(state)
# Assert that the episode is now at the same state as before.
self.assertEqual(episode_2.id_, episode.id_)
self.assertEqual(episode_2.agent_id, episode.agent_id)
self.assertEqual(
episode_2.multi_agent_episode_id, episode.multi_agent_episode_id
)
check(episode_2.t, episode.t)
check(episode_2.t_started, episode.t_started)
check(episode_2.observations[5], episode.observations[5])
check(episode_2.actions[5], episode.actions[5])
check(episode_2.rewards[5], episode.rewards[5])
check(episode_2.infos[5], episode.infos[5])
check(episode_2.is_terminated, episode.is_terminated)
check(episode_2.is_truncated, episode.is_truncated)
self.assertEqual(
type(episode_2._observation_space), type(episode._observation_space)
)
self.assertEqual(type(episode_2._action_space), type(episode._action_space))
check(episode_2._start_time, episode._start_time)
check(episode_2._last_step_time, episode._last_step_time)
check(episode_2.custom_data, episode.custom_data)
self.assertDictEqual(episode_2.extra_model_outputs, episode.extra_model_outputs)
def test_setters(self):
"""Tests whether the SingleAgentEpisode's setter methods work as expected.
Also tests numpy'ized episodes.
This test covers all setter methods:
- set_observations
- set_actions
- set_rewards
- set_extra_model_outputs
Each setter is tested with various indexing scenarios including:
- Single index
- List of indices
- Slice objects
- Negative indices (both regular and lookback buffer interpretation)
"""
SOME_KEY = "some_key"
# Create a simple episode without lookback buffer first for basic tests
episode = SingleAgentEpisode(
observations=[100, 101, 102, 103, 104, 105, 106],
actions=[1, 2, 3, 4, 5, 6],
rewards=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
extra_model_outputs={
SOME_KEY: [0.01, 0.02, 0.03, 0.04, 0.05, 0.06],
},
len_lookback_buffer=0,
)
test_patterns = [
# (description, new_data, indices)
("zero index", 7353.0, 0),
("single index", 7353.0, 2),
("negative index", 7353.0, -1),
("short list of indices", [7353.0], [1]),
("long list of indices", [73.0, 53.0, 35.0, 53.0], [1, 2, 3, 4]),
("short slice", [7353.0], slice(2, 3)),
("long slice", [7.0, 3.0, 5.0, 3.0], slice(2, 6)),
]
# Test set_rewards with all patterns
numpy_episode = copy.deepcopy(episode).to_numpy()
for e in [episode, numpy_episode]:
print(f"Testing numpy'ized={e.is_numpy}...")
for desc, new_data, indices in test_patterns:
print(f"Testing {desc}...")
expected_data = new_data
if e.is_numpy and isinstance(new_data, list):
new_data = np.array(new_data)
e.set_observations(new_data=new_data, at_indices=indices)
check(e.get_observations(indices), expected_data)
e.set_actions(new_data=new_data, at_indices=indices)
check(e.get_actions(indices), expected_data)
e.set_rewards(new_data=new_data, at_indices=indices)
check(e.get_rewards(indices), expected_data)
e.set_extra_model_outputs(
key=SOME_KEY, new_data=new_data, at_indices=indices
)
actual_data = e.get_extra_model_outputs(SOME_KEY)
if (
desc == "single index"
or desc == "zero index"
or desc == "negative index"
):
check(
actual_data[e.t_started + indices],
expected_data,
)
elif desc == "long list of indices" or desc == "short list of indices":
actual_values = actual_data[
slice(e.t_started + indices[0], e.t_started + indices[-1] + 1)
]
check(actual_values, expected_data)
elif desc == "long slice" or desc == "short slice":
actual_values = [
actual_data[e.t_started + i]
for i in range(indices.start, indices.stop)
]
check(actual_values, expected_data)
else:
raise ValueError(f"Invalid test pattern: {desc}")
def test_setters_error_cases(self):
"""Tests error cases for setter methods."""
episode = self._create_episode(100)
# Test IndexError when slice size doesn't match data size for observations
with self.assertRaises(IndexError):
episode.set_observations(
new_data=[7, 3, 5, 3], at_indices=slice(0, 2)
) # Slice of size 2, data of size 4
# Test AssertionError when key doesn't exist for extra_model_outputs
with self.assertRaises(AssertionError):
episode.set_extra_model_outputs(
key="nonexistent_key", new_data=999, at_indices=0
)
def _create_episode(self, num_data, t_started=None, len_lookback_buffer=0):
# Sample 100 values and initialize episode with observations and infos.
env = gym.make("CartPole-v1")
# Initialize containers.
observations = []
rewards = []
actions = []
infos = []
extra_model_outputs = defaultdict(list)
# Initialize observation and info.
init_obs, init_info = env.reset()
observations.append(init_obs)
infos.append(init_info)
# Run n samples.
for _ in range(num_data):
action = env.action_space.sample()
obs, reward, _, _, info = env.step(action)
observations.append(obs)
actions.append(action)
rewards.append(reward)
infos.append(info)
extra_model_outputs["extra_1"].append(np.random.random())
extra_model_outputs["state_out"].append(np.random.random())
return SingleAgentEpisode(
observations=observations,
infos=infos,
actions=actions,
rewards=rewards,
extra_model_outputs=extra_model_outputs,
t_started=t_started,
len_lookback_buffer=len_lookback_buffer,
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestSingleAgentEpisode |
python | gevent__gevent | src/gevent/tests/test__socket_errors.py | {
"start": 1307,
"end": 1869
} | class ____(greentest.TestCase):
__timeout__ = 5
def test_connection_refused(self):
port = support.find_unused_port()
with socket() as s:
try:
with self.assertRaises(error) as exc:
s.connect((greentest.DEFAULT_CONNECT_HOST, port))
except LoopExit:
return
ex = exc.exception
self.assertIn(ex.args[0], sysinfo.CONN_REFUSED_ERRORS, ex)
self.assertIn('refused', str(ex).lower())
if __name__ == '__main__':
greentest.main()
| TestSocketErrors |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 132047,
"end": 132304
} | class ____(str, Enum):
"""
Role of the peer in the consensus
"""
def __str__(self) -> str:
return str(self.value)
FOLLOWER = "Follower"
CANDIDATE = "Candidate"
LEADER = "Leader"
PRECANDIDATE = "PreCandidate"
| StateRole |
python | joblib__joblib | joblib/memory.py | {
"start": 8111,
"end": 9327
} | class ____(object):
"""Class representing an arbitrary value.
This class is a replacement for MemorizedResult when there is no cache.
"""
__slots__ = ("value", "valid")
def __init__(self, value):
self.value = value
self.valid = True
def get(self):
if self.valid:
return self.value
else:
raise KeyError("No value stored.")
def clear(self):
self.valid = False
self.value = None
def __repr__(self):
if self.valid:
return "{class_name}({value})".format(
class_name=self.__class__.__name__, value=pformat(self.value)
)
else:
return self.__class__.__name__ + " with no value"
# __getstate__ and __setstate__ are required because of __slots__
def __getstate__(self):
return {"valid": self.valid, "value": self.value}
def __setstate__(self, state):
self.valid = state["valid"]
self.value = state["value"]
###############################################################################
# class `NotMemorizedFunc`
###############################################################################
| NotMemorizedResult |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_claim_status.py | {
"start": 383,
"end": 7471
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allocation': 'V1AllocationResult',
'devices': 'list[V1AllocatedDeviceStatus]',
'reserved_for': 'list[V1ResourceClaimConsumerReference]'
}
attribute_map = {
'allocation': 'allocation',
'devices': 'devices',
'reserved_for': 'reservedFor'
}
def __init__(self, allocation=None, devices=None, reserved_for=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceClaimStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allocation = None
self._devices = None
self._reserved_for = None
self.discriminator = None
if allocation is not None:
self.allocation = allocation
if devices is not None:
self.devices = devices
if reserved_for is not None:
self.reserved_for = reserved_for
@property
def allocation(self):
"""Gets the allocation of this V1ResourceClaimStatus. # noqa: E501
:return: The allocation of this V1ResourceClaimStatus. # noqa: E501
:rtype: V1AllocationResult
"""
return self._allocation
@allocation.setter
def allocation(self, allocation):
"""Sets the allocation of this V1ResourceClaimStatus.
:param allocation: The allocation of this V1ResourceClaimStatus. # noqa: E501
:type: V1AllocationResult
"""
self._allocation = allocation
@property
def devices(self):
"""Gets the devices of this V1ResourceClaimStatus. # noqa: E501
Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers. # noqa: E501
:return: The devices of this V1ResourceClaimStatus. # noqa: E501
:rtype: list[V1AllocatedDeviceStatus]
"""
return self._devices
@devices.setter
def devices(self, devices):
"""Sets the devices of this V1ResourceClaimStatus.
Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers. # noqa: E501
:param devices: The devices of this V1ResourceClaimStatus. # noqa: E501
:type: list[V1AllocatedDeviceStatus]
"""
self._devices = devices
@property
def reserved_for(self):
"""Gets the reserved_for of this V1ResourceClaimStatus. # noqa: E501
ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501
:return: The reserved_for of this V1ResourceClaimStatus. # noqa: E501
:rtype: list[V1ResourceClaimConsumerReference]
"""
return self._reserved_for
@reserved_for.setter
def reserved_for(self, reserved_for):
"""Sets the reserved_for of this V1ResourceClaimStatus.
ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated. In a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled. Both schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again. There can be at most 256 such reservations. This may get increased in the future, but not reduced. # noqa: E501
:param reserved_for: The reserved_for of this V1ResourceClaimStatus. # noqa: E501
:type: list[V1ResourceClaimConsumerReference]
"""
self._reserved_for = reserved_for
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceClaimStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceClaimStatus):
return True
return self.to_dict() != other.to_dict()
| V1ResourceClaimStatus |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_dataflow.py | {
"start": 69595,
"end": 73115
} | class ____:
@pytest.fixture
def hook(self):
return AsyncDataflowHook(
gcp_conn_id=TEST_PROJECT_ID,
)
@pytest.mark.asyncio
@mock.patch(DATAFLOW_STRING.format("AsyncDataflowHook.initialize_client"))
async def test_get_job(self, initialize_client_mock, hook, make_mock_awaitable):
client = initialize_client_mock.return_value
make_mock_awaitable(client.get_job, None)
await hook.get_job(
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
)
request = GetJobRequest(
dict(
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
view=JobView.JOB_VIEW_SUMMARY,
)
)
initialize_client_mock.assert_called_once()
client.get_job.assert_called_once_with(
request=request,
)
@pytest.mark.asyncio
@mock.patch(DATAFLOW_STRING.format("AsyncDataflowHook.initialize_client"))
async def test_list_jobs(self, initialize_client_mock, hook, make_mock_awaitable):
client = initialize_client_mock.return_value
make_mock_awaitable(client.get_job, None)
await hook.list_jobs(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
jobs_filter=TEST_JOBS_FILTER,
)
request = ListJobsRequest(
{
"project_id": TEST_PROJECT_ID,
"location": TEST_LOCATION,
"filter": TEST_JOBS_FILTER,
"page_size": None,
"page_token": None,
}
)
initialize_client_mock.assert_called_once()
client.list_jobs.assert_called_once_with(request=request)
@pytest.mark.asyncio
@mock.patch(DATAFLOW_STRING.format("AsyncDataflowHook.initialize_client"))
async def test_list_job_messages(self, initialize_client_mock, hook):
client = initialize_client_mock.return_value
await hook.list_job_messages(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
request = ListJobMessagesRequest(
{
"project_id": TEST_PROJECT_ID,
"job_id": TEST_JOB_ID,
"minimum_importance": JobMessageImportance.JOB_MESSAGE_BASIC,
"page_size": None,
"page_token": None,
"start_time": None,
"end_time": None,
"location": TEST_LOCATION,
}
)
initialize_client_mock.assert_called_once()
client.list_job_messages.assert_called_once_with(request=request)
@pytest.mark.asyncio
@mock.patch(DATAFLOW_STRING.format("AsyncDataflowHook.initialize_client"))
async def test_get_job_metrics(self, initialize_client_mock, hook):
client = initialize_client_mock.return_value
await hook.get_job_metrics(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
request = GetJobMetricsRequest(
{
"project_id": TEST_PROJECT_ID,
"job_id": TEST_JOB_ID,
"start_time": None,
"location": TEST_LOCATION,
}
)
initialize_client_mock.assert_called_once()
client.get_job_metrics.assert_called_once_with(request=request)
| TestAsyncDataflowHook |
python | dagster-io__dagster | examples/experimental/assets_yaml_dsl/assets_yaml_dsl/domain_specific_dsl/stocks_dsl.py | {
"start": 1161,
"end": 1210
} | class ____(NamedTuple):
type: str
| IndexStrategy |
python | facebook__pyre-check | tools/generate_taint_models/tests/inspect_parser_test.py | {
"start": 581,
"end": 653
} | class ____(TestClass):
pass
@final
@dataclass(frozen=True)
| TestDerived |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/pfor_test.py | {
"start": 1066,
"end": 2714
} | class ____(test.TestCase):
def test_rank_known(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, [None, None])
rank = pfor._rank(x)
self.assertIsInstance(rank, int)
self.assertEqual(rank, 2)
def test_rank_unknown(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
rank = pfor._rank(x)
self.assertIsInstance(rank, tensor.Tensor)
def test_size_known(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, [3, 5])
size = pfor._size(x)
self.assertIsInstance(size, int)
self.assertEqual(size, 3 * 5)
def test_size_unknown(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32, [3, None])
size = pfor._size(x, dtypes.int32)
self.assertIsInstance(size, tensor.Tensor)
self.assertEqual(size.dtype, dtypes.int32)
size = pfor._size(x, dtypes.int64)
self.assertIsInstance(size, tensor.Tensor)
self.assertEqual(size.dtype, dtypes.int64)
def test_expand_dims_static(self):
x = random_ops.random_uniform([3, 5])
axis = 1
num_axes = 2
expected = array_ops.reshape(x, [3, 1, 1, 5])
actual = pfor._expand_dims(x, axis, num_axes)
self.assertAllEqual(expected, actual)
def test_expand_dims_dynamic(self):
x = random_ops.random_uniform([3, 5])
axis = 1
num_axes = constant_op.constant([2])
expected = array_ops.reshape(x, [3, 1, 1, 5])
actual = pfor._expand_dims(x, axis, num_axes)
self.assertAllEqual(expected, actual)
if __name__ == '__main__':
test.main()
| PForTest |
python | huggingface__transformers | tests/models/grounding_dino/test_image_processing_grounding_dino.py | {
"start": 5981,
"end": 32544
} | class ____(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = GroundingDinoImageProcessor if is_vision_available() else None
fast_image_processing_class = GroundingDinoImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = GroundingDinoImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
# Copied from tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_image_processor_properties with DeformableDetr->GroundingDino
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "size"))
# Copied from tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_image_processor_from_dict_with_kwargs with DeformableDetr->GroundingDino
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad, True)
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 1333})
def test_post_process_object_detection(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
outputs = self.image_processor_tester.get_fake_grounding_dino_output()
results = image_processor.post_process_object_detection(outputs, threshold=0.0)
self.assertEqual(len(results), self.image_processor_tester.batch_size)
self.assertEqual(list(results[0].keys()), ["scores", "labels", "boxes"])
self.assertEqual(results[0]["boxes"].shape, (self.image_processor_tester.num_queries, 4))
self.assertEqual(results[0]["scores"].shape, (self.image_processor_tester.num_queries,))
expected_scores = torch.tensor([0.7050, 0.7222, 0.7222, 0.6829, 0.7220])
torch.testing.assert_close(results[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4)
expected_box_slice = torch.tensor([0.6908, 0.4354, 1.0737, 1.3947])
torch.testing.assert_close(results[0]["boxes"][0], expected_box_slice, rtol=1e-4, atol=1e-4)
@slow
# Copied from tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_call_pytorch_with_coco_detection_annotations with DeformableDetr->GroundingDino
def test_call_pytorch_with_coco_detection_annotations(self):
# prepare image and target
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
target = {"image_id": 39769, "annotations": target}
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class()
encoding = image_processing(images=image, annotations=target, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->GroundingDino
def test_batched_coco_detection_annotations(self):
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f:
target = json.loads(f.read())
annotations_0 = {"image_id": 39769, "annotations": target}
annotations_1 = {"image_id": 39769, "annotations": target}
# Adjust the bounding boxes for the resized image
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotations_1["annotations"])):
coords = annotations_1["annotations"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotations_1["annotations"][i]["bbox"] = new_bbox
images = [image_0, image_1]
annotations = [annotations_0, annotations_1]
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class()
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
return_tensors="pt", # do_convert_annotations=True
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 800, 1066
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.6879, 0.4609, 0.0755, 0.3691],
[0.2118, 0.3359, 0.2601, 0.1566],
[0.5011, 0.5000, 0.9979, 1.0000],
[0.5010, 0.5020, 0.9979, 0.9959],
[0.3284, 0.5944, 0.5884, 0.8112],
[0.8394, 0.5445, 0.3213, 0.9110],
]
)
expected_boxes_1 = torch.tensor(
[
[0.4130, 0.2765, 0.0453, 0.2215],
[0.1272, 0.2016, 0.1561, 0.0940],
[0.3757, 0.4933, 0.7488, 0.9865],
[0.3759, 0.5002, 0.7492, 0.9955],
[0.1971, 0.5456, 0.3532, 0.8646],
[0.5790, 0.4115, 0.3430, 0.7161],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066]))
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
@slow
# Copied from tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_call_pytorch_with_coco_panoptic_annotations with DeformableDetr->GroundingDino
def test_call_pytorch_with_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class(format="coco_panoptic")
encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
# verify pixel values
expected_shape = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4)
# verify area
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
torch.testing.assert_close(encoding["labels"][0]["area"], expected_area)
# verify boxes
expected_boxes_shape = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3)
# verify image_id
expected_image_id = torch.tensor([39769])
torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id)
# verify is_crowd
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd)
# verify class_labels
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels)
# verify masks
expected_masks_sum = 822873
relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum
self.assertTrue(relative_error < 1e-3)
# verify orig_size
expected_orig_size = torch.tensor([480, 640])
torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size)
# verify size
expected_size = torch.tensor([800, 1066])
torch.testing.assert_close(encoding["labels"][0]["size"], expected_size)
@slow
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->GroundingDino
def test_batched_coco_panoptic_annotations(self):
# prepare image, target and masks_path
image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800))
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f:
target = json.loads(f.read())
annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
w_0, h_0 = image_0.size
w_1, h_1 = image_1.size
for i in range(len(annotation_1["segments_info"])):
coords = annotation_1["segments_info"][i]["bbox"]
new_bbox = [
coords[0] * w_1 / w_0,
coords[1] * h_1 / h_0,
coords[2] * w_1 / w_0,
coords[3] * h_1 / h_0,
]
annotation_1["segments_info"][i]["bbox"] = new_bbox
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
images = [image_0, image_1]
annotations = [annotation_0, annotation_1]
for image_processing_class in self.image_processor_list:
# encode them
image_processing = image_processing_class(format="coco_panoptic")
encoding = image_processing(
images=images,
annotations=annotations,
masks_path=masks_path,
return_tensors="pt",
return_segmentation_masks=True,
)
# Check the pixel values have been padded
postprocessed_height, postprocessed_width = 800, 1066
expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width])
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
# Check the bounding boxes have been adjusted for padded images
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
expected_boxes_0 = torch.tensor(
[
[0.2625, 0.5437, 0.4688, 0.8625],
[0.7719, 0.4104, 0.4531, 0.7125],
[0.5000, 0.4927, 0.9969, 0.9854],
[0.1688, 0.2000, 0.2063, 0.0917],
[0.5492, 0.2760, 0.0578, 0.2187],
[0.4992, 0.4990, 0.9984, 0.9979],
]
)
expected_boxes_1 = torch.tensor(
[
[0.1576, 0.3262, 0.2814, 0.5175],
[0.4634, 0.2463, 0.2720, 0.4275],
[0.3002, 0.2956, 0.5985, 0.5913],
[0.1013, 0.1200, 0.1238, 0.0550],
[0.3297, 0.1656, 0.0347, 0.1312],
[0.2997, 0.2994, 0.5994, 0.5987],
]
)
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3)
# Check the masks have also been padded
self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066]))
self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066]))
# Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height
# format and not in the range [0, 1]
encoding = image_processing(
images=images,
annotations=annotations,
masks_path=masks_path,
return_segmentation_masks=True,
do_convert_annotations=False,
return_tensors="pt",
)
self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4]))
self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4]))
# Convert to absolute coordinates
unnormalized_boxes_0 = torch.vstack(
[
expected_boxes_0[:, 0] * postprocessed_width,
expected_boxes_0[:, 1] * postprocessed_height,
expected_boxes_0[:, 2] * postprocessed_width,
expected_boxes_0[:, 3] * postprocessed_height,
]
).T
unnormalized_boxes_1 = torch.vstack(
[
expected_boxes_1[:, 0] * postprocessed_width,
expected_boxes_1[:, 1] * postprocessed_height,
expected_boxes_1[:, 2] * postprocessed_width,
expected_boxes_1[:, 3] * postprocessed_height,
]
).T
# Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max
expected_boxes_0 = torch.vstack(
[
unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2,
unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2,
unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2,
]
).T
expected_boxes_1 = torch.vstack(
[
unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2,
unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2,
unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2,
]
).T
torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1)
torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1)
# Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->GroundingDino
def test_max_width_max_height_resizing_and_pad_strategy(self):
for image_processing_class in self.image_processor_list:
image_1 = torch.ones([200, 100, 3], dtype=torch.uint8)
# do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50
image_processor = image_processing_class(
size={"max_height": 100, "max_width": 100},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50]))
# do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100
image_processor = image_processing_class(
size={"max_height": 300, "max_width": 100},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
# do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100
image_processor = image_processing_class(
size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100}
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100]))
# do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100
image_processor = image_processing_class(
size={"max_height": 300, "max_width": 100},
do_pad=True,
pad_size={"height": 301, "width": 101},
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101]))
### Check for batch
image_2 = torch.ones([100, 150, 3], dtype=torch.uint8)
# do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100
image_processor = image_processing_class(
size={"max_height": 150, "max_width": 100},
do_pad=True,
pad_size={"height": 150, "width": 100},
)
inputs = image_processor(images=[image_1, image_2], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100]))
def test_longest_edge_shortest_edge_resizing_strategy(self):
image_1 = torch.ones([958, 653, 3], dtype=torch.uint8)
# max size is set; width < height;
# do_pad=False, longest_edge=640, shortest_edge=640, image=958x653 -> 640x436
image_processor = GroundingDinoImageProcessor(
size={"longest_edge": 640, "shortest_edge": 640},
do_pad=False,
)
inputs = image_processor(images=[image_1], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 640, 436]))
image_2 = torch.ones([653, 958, 3], dtype=torch.uint8)
# max size is set; height < width;
# do_pad=False, longest_edge=640, shortest_edge=640, image=653x958 -> 436x640
image_processor = GroundingDinoImageProcessor(
size={"longest_edge": 640, "shortest_edge": 640},
do_pad=False,
)
inputs = image_processor(images=[image_2], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 436, 640]))
image_3 = torch.ones([100, 120, 3], dtype=torch.uint8)
# max size is set; width == size; height > max_size;
# do_pad=False, longest_edge=118, shortest_edge=100, image=120x100 -> 118x98
image_processor = GroundingDinoImageProcessor(
size={"longest_edge": 118, "shortest_edge": 100},
do_pad=False,
)
inputs = image_processor(images=[image_3], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 98, 118]))
image_4 = torch.ones([128, 50, 3], dtype=torch.uint8)
# max size is set; height == size; width < max_size;
# do_pad=False, longest_edge=256, shortest_edge=50, image=50x128 -> 50x128
image_processor = GroundingDinoImageProcessor(
size={"longest_edge": 256, "shortest_edge": 50},
do_pad=False,
)
inputs = image_processor(images=[image_4], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 128, 50]))
image_5 = torch.ones([50, 50, 3], dtype=torch.uint8)
# max size is set; height == width; width < max_size;
# do_pad=False, longest_edge=117, shortest_edge=50, image=50x50 -> 50x50
image_processor = GroundingDinoImageProcessor(
size={"longest_edge": 117, "shortest_edge": 50},
do_pad=False,
)
inputs = image_processor(images=[image_5], return_tensors="pt")
self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 50, 50]))
| GroundingDinoImageProcessingTest |
python | getsentry__sentry | tests/sentry/api/helpers/test_deprecation.py | {
"start": 1508,
"end": 10857
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
def assert_deprecation_metadata(self, request: HttpRequest, response: HttpResponse) -> None:
assert "X-Sentry-Deprecation-Date" in response
assert "X-Sentry-Replacement-Endpoint" in response
assert response["X-Sentry-Deprecation-Date"] == test_date.isoformat()
assert response["X-Sentry-Replacement-Endpoint"] == replacement_api
def assert_not_deprecated(self, method):
request = self.make_request(method=method)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_200_OK
assert "X-Sentry-Deprecation-Date" not in resp
assert "X-Sentry-Replacement-Endpoint" not in resp
def assert_allowed_request(self, method):
request = self.make_request(method=method)
request.META["HTTP_ORIGIN"] = "http://example.com"
resp = dummy_endpoint(request)
resp.render()
assert resp.status_code == HTTP_200_OK
self.assert_deprecation_metadata(request, resp)
def assert_denied_request(self, method):
request = self.make_request(method=method)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_410_GONE
assert resp.data == {"message": "This API no longer exists."}
self.assert_deprecation_metadata(request, resp)
def test_before_deprecation_date(self) -> None:
with self.settings(SENTRY_SELF_HOSTED=False):
with freeze_time(test_date - timedelta(seconds=1)):
self.assert_allowed_request("GET")
def test_after_deprecation_date(self) -> None:
with self.settings(SENTRY_SELF_HOSTED=False):
with freeze_time(test_date):
self.assert_allowed_request("GET")
brownout_start = next(timeiter)
with freeze_time(brownout_start):
self.assert_denied_request("GET")
mid_brownout = brownout_start + timedelta(seconds=1)
with freeze_time(mid_brownout):
self.assert_denied_request("GET")
brownout_end = brownout_start + timedelta(minutes=1)
with freeze_time(brownout_end):
self.assert_allowed_request("GET")
def test_self_hosted(self) -> None:
with self.settings(SENTRY_SELF_HOSTED=True, ENVIRONMENT="production"):
self.assert_not_deprecated("GET")
def test_no_decorator(self) -> None:
with self.settings(SENTRY_SELF_HOSTED=False):
self.assert_not_deprecated("HEAD")
def test_default_key(self) -> None:
with (
self.settings(SENTRY_SELF_HOSTED=False),
override_options(
{
"api.deprecation.brownout-duration": custom_duration,
"api.deprecation.brownout-cron": custom_cron,
}
),
):
custom_time_iter = CronSim(custom_cron, test_date)
custom_duration_timedelta = timedelta(seconds=custom_duration)
old_brownout_start = next(timeiter)
with freeze_time(old_brownout_start):
self.assert_allowed_request("GET")
new_brownout_start = next(custom_time_iter)
with freeze_time(new_brownout_start):
self.assert_denied_request("GET")
old_brownout_end = new_brownout_start + default_duration
with freeze_time(old_brownout_end):
self.assert_denied_request("GET")
new_brownout_end = new_brownout_start + custom_duration_timedelta
with freeze_time(new_brownout_end):
self.assert_allowed_request("GET")
def test_custom_key(self) -> None:
with self.settings(
SENTRY_SELF_HOSTED=False,
):
old_brownout_start = next(timeiter)
with freeze_time(old_brownout_start):
self.assert_denied_request("POST")
register("override-cron", default=custom_cron)
register("override-duration", default=custom_duration)
custom_time_iter = CronSim(custom_cron, test_date)
custom_duration_timedelta = timedelta(seconds=custom_duration)
with freeze_time(old_brownout_start):
self.assert_allowed_request("POST")
new_brownout_start = next(custom_time_iter)
with freeze_time(new_brownout_start):
self.assert_denied_request("POST")
new_brownout_end = new_brownout_start + custom_duration_timedelta
with freeze_time(new_brownout_end):
self.assert_allowed_request("POST")
def test_bad_schedule_format(self) -> None:
brownout_start = next(timeiter)
with freeze_time(brownout_start):
with (
self.settings(SENTRY_SELF_HOSTED=False),
override_options(
{
"api.deprecation.brownout-duration": "bad duration",
},
),
):
self.assert_allowed_request("GET")
with (
self.settings(SENTRY_SELF_HOSTED=False),
override_options(
{
"api.deprecation.brownout-duration": 60,
},
),
):
options.delete("api.deprecation.brownout-duration")
self.assert_denied_request("GET")
with (
self.settings(SENTRY_SELF_HOSTED=False),
override_options(
{
"api.deprecation.brownout-cron": "bad schedule",
},
),
):
options.delete("api.deprecation.brownout-cron")
self.assert_allowed_request("GET")
with (
self.settings(SENTRY_SELF_HOSTED=False),
override_options(
{
"api.deprecation.brownout-cron": "0 12 * * *",
},
),
):
options.delete("api.deprecation.brownout-cron")
self.assert_denied_request("GET")
def test_with_url_names(self) -> None:
with self.settings(SENTRY_SELF_HOSTED=False):
# Resolver url_name doesn't match
request = self.make_request(method="DELETE")
request.resolver_match = ResolverMatch(
func=dummy_endpoint, args=tuple(), kwargs={}, url_name="sentry-dummy-delete"
)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_200_OK
assert "X-Sentry-Deprecation-Date" not in resp
assert "X-Sentry-Replacement-Endpoint" not in resp
# Resolver url_name does match
request = self.make_request(method="DELETE")
request.resolver_match = ResolverMatch(
func=dummy_endpoint,
args=tuple(),
kwargs={},
url_name="sentry-dummy-delete-deprecated",
)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_200_OK
assert "X-Sentry-Deprecation-Date" in resp
def test_with_url_names_brownout(self) -> None:
# Before the brownout
with self.settings(SENTRY_SELF_HOSTED=False), freeze_time(test_date - timedelta(minutes=1)):
request = self.make_request(method="DELETE")
request.resolver_match = ResolverMatch(
func=dummy_endpoint,
args=tuple(),
kwargs={},
url_name="sentry-dummy-delete-deprecated",
)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_200_OK
assert "X-Sentry-Deprecation-Date" in resp
# url name that doesn't match is not deprecated
request = self.make_request(method="DELETE")
request.resolver_match = ResolverMatch(
func=dummy_endpoint, args=tuple(), kwargs={}, url_name="sentry-dummy-delete"
)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_200_OK
assert "X-Sentry-Deprecation-Date" not in resp
# After the brownout
brownout_start = next(timeiter)
with self.settings(SENTRY_SELF_HOSTED=False), freeze_time(brownout_start):
request = self.make_request(method="DELETE")
request.resolver_match = ResolverMatch(
func=dummy_endpoint,
args=tuple(),
kwargs={},
url_name="sentry-dummy-delete-deprecated",
)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_410_GONE
assert "X-Sentry-Deprecation-Date" in resp
# Other url names are not deprecated
request = self.make_request(method="DELETE")
request.resolver_match = ResolverMatch(
func=dummy_endpoint, args=tuple(), kwargs={}, url_name="sentry-dummy-delete"
)
resp = dummy_endpoint(request)
assert resp.status_code == HTTP_200_OK
assert "X-Sentry-Deprecation-Date" not in resp
| TestDeprecationDecorator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/experiment_service.py | {
"start": 1037,
"end": 3870
} | class ____(GoogleBaseHook):
"""Use the Vertex AI SDK for Python to manage your experiments."""
@GoogleBaseHook.fallback_to_default_project_id
def create_experiment(
self,
experiment_name: str,
location: str,
experiment_description: str = "",
project_id: str = PROVIDE_PROJECT_ID,
experiment_tensorboard: str | None = None,
):
"""
Create an experiment and, optionally, associate a Vertex AI TensorBoard instance using the Vertex AI SDK for Python.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param experiment_description: Optional. Description of the evaluation experiment.
:param experiment_tensorboard: Optional. The Vertex TensorBoard instance to use as a backing
TensorBoard for the provided experiment. If no TensorBoard is provided, a default Tensorboard
instance is created and used by this experiment.
"""
aiplatform.init(
experiment=experiment_name,
experiment_description=experiment_description,
experiment_tensorboard=experiment_tensorboard if experiment_tensorboard else False,
project=project_id,
location=location,
)
self.log.info("Created experiment with name: %s", experiment_name)
@GoogleBaseHook.fallback_to_default_project_id
def delete_experiment(
self,
experiment_name: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
delete_backing_tensorboard_runs: bool = False,
) -> None:
"""
Delete an experiment.
Deleting an experiment deletes that experiment and all experiment runs associated with the experiment.
The Vertex AI TensorBoard experiment associated with the experiment is not deleted.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param delete_backing_tensorboard_runs: Optional. If True will also delete the Vertex AI TensorBoard
runs associated with the experiment runs under this experiment that we used to store time series
metrics.
"""
experiment = aiplatform.Experiment(
experiment_name=experiment_name, project=project_id, location=location
)
experiment.delete(delete_backing_tensorboard_runs=delete_backing_tensorboard_runs)
| ExperimentHook |
python | Pylons__pyramid | src/pyramid/config/i18n.py | {
"start": 214,
"end": 4989
} | class ____:
@action_method
def set_locale_negotiator(self, negotiator):
"""
Set the :term:`locale negotiator` for this application. The
:term:`locale negotiator` is a callable which accepts a
:term:`request` object and which returns a :term:`locale
name`. The ``negotiator`` argument should be the locale
negotiator implementation or a :term:`dotted Python name`
which refers to such an implementation.
Later calls to this method override earlier calls; there can
be only one locale negotiator active at a time within an
application. See :ref:`activating_translation` for more
information.
.. note::
Using the ``locale_negotiator`` argument to the
:class:`pyramid.config.Configurator` constructor can be used to
achieve the same purpose.
"""
def register():
self._set_locale_negotiator(negotiator)
intr = self.introspectable(
'locale negotiator',
None,
self.object_description(negotiator),
'locale negotiator',
)
intr['negotiator'] = negotiator
self.action(ILocaleNegotiator, register, introspectables=(intr,))
def _set_locale_negotiator(self, negotiator):
locale_negotiator = self.maybe_dotted(negotiator)
self.registry.registerUtility(locale_negotiator, ILocaleNegotiator)
@action_method
def add_translation_dirs(self, *specs, **kw):
"""Add one or more :term:`translation directory` paths to the
current configuration state. The ``specs`` argument is a
sequence that may contain absolute directory paths
(e.g. ``/usr/share/locale``) or :term:`asset specification`
names naming a directory path (e.g. ``some.package:locale``)
or a combination of the two.
Example:
.. code-block:: python
config.add_translation_dirs('/usr/share/locale',
'some.package:locale')
The translation directories are defined as a list in which
translations defined later have precedence over translations defined
earlier.
By default, consecutive calls to ``add_translation_dirs`` will add
directories to the start of the list. This means later calls to
``add_translation_dirs`` will have their translations trumped by
earlier calls. If you explicitly need this call to trump an earlier
call then you may set ``override`` to ``True``.
If multiple specs are provided in a single call to
``add_translation_dirs``, the directories will be inserted in the
order they're provided (earlier items are trumped by later items).
.. versionchanged:: 1.8
The ``override`` parameter was added to allow a later call
to ``add_translation_dirs`` to override an earlier call, inserting
folders at the beginning of the translation directory list.
"""
introspectables = []
override = kw.pop('override', False)
if kw:
raise TypeError('invalid keyword arguments: %s', sorted(kw.keys()))
def register():
directories = []
resolver = AssetResolver(self.package_name)
# defer spec resolution until register to allow for asset
# overrides to take place in an earlier config phase
for spec in specs:
# the trailing slash helps match asset overrides for folders
if not spec.endswith('/'):
spec += '/'
asset = resolver.resolve(spec)
directory = asset.abspath()
if not asset.isdir():
raise ConfigurationError(
'"%s" is not a directory' % directory
)
intr = self.introspectable(
'translation directories',
directory,
spec,
'translation directory',
)
intr['directory'] = directory
intr['spec'] = spec
introspectables.append(intr)
directories.append(directory)
tdirs = self.registry.queryUtility(ITranslationDirectories)
if tdirs is None:
tdirs = []
self.registry.registerUtility(tdirs, ITranslationDirectories)
if override:
tdirs.extend(directories)
else:
for directory in reversed(directories):
tdirs.insert(0, directory)
self.action(None, register, introspectables=introspectables)
| I18NConfiguratorMixin |
python | pola-rs__polars | py-polars/src/polars/datatype_expr/array.py | {
"start": 68,
"end": 1401
} | class ____:
"""Namespace for arr datatype expressions."""
_accessor = "arr"
def __init__(self, expr: pl.DataTypeExpr) -> None:
self._pydatatype_expr = expr._pydatatype_expr
def inner_dtype(self) -> pl.DataTypeExpr:
"""Get the inner DataType of array."""
return pl.DataTypeExpr._from_pydatatype_expr(
self._pydatatype_expr.arr_inner_dtype()
)
def width(self) -> pl.Expr:
"""
Get the array width.
Examples
--------
>>> pl.select(pl.Array(pl.Int8, (1, 2, 3)).to_dtype_expr().arr.width())
shape: (1, 1)
βββββββββββ
β literal β
β --- β
β u32 β
βββββββββββ‘
β 1 β
βββββββββββ
"""
return pl.Expr._from_pyexpr(self._pydatatype_expr.arr_width())
def shape(self) -> pl.Expr:
"""
Get the array shape.
Examples
--------
>>> pl.select(pl.Array(pl.Int8, (1, 2, 3)).to_dtype_expr().arr.shape())
shape: (3, 1)
βββββββββββ
β literal β
β --- β
β u32 β
βββββββββββ‘
β 1 β
β 2 β
β 3 β
βββββββββββ
"""
return pl.Expr._from_pyexpr(self._pydatatype_expr.arr_shape())
| DataTypeExprArrNameSpace |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 8403,
"end": 8831
} | class ____(TrainingArguments):
a: float = 0.0
b: float = 0.0
keep_report_to: bool = False
def __post_init__(self):
super().__post_init__()
# save resources not dealing with reporting unless specified (also avoids the warning when it's not set)
# can be explicitly disabled via `keep_report_to`
if not self.keep_report_to:
self.report_to = []
| RegressionTrainingArguments |
python | openai__openai-python | src/openai/resources/conversations/conversations.py | {
"start": 1189,
"end": 8666
} | class ____(SyncAPIResource):
@cached_property
def items(self) -> Items:
return Items(self._client)
@cached_property
def with_raw_response(self) -> ConversationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ConversationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ConversationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ConversationsWithStreamingResponse(self)
def create(
self,
*,
items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Create a conversation.
Args:
items: Initial items to include in the conversation context. You may add up to 20 items
at a time.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/conversations",
body=maybe_transform(
{
"items": items,
"metadata": metadata,
},
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def retrieve(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Get a conversation
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def update(
self,
conversation_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Update a conversation
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
f"/conversations/{conversation_id}",
body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def delete(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationDeletedResource:
"""Delete a conversation.
Items in the conversation will not be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._delete(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ConversationDeletedResource,
)
| Conversations |
python | Textualize__textual | src/textual/reactive.py | {
"start": 794,
"end": 937
} | class ____:
"""A wrapper to indicate a value was mutated."""
def __init__(self, value: Any) -> None:
self.value = value
| _Mutated |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 21716,
"end": 24141
} | class ____(CythonCommand):
"""
Invoke a Cython command. Available commands are:
cy import
cy break
cy step
cy next
cy run
cy cont
cy finish
cy up
cy down
cy select
cy bt / cy backtrace
cy list
cy print
cy set
cy locals
cy globals
cy exec
"""
name = 'cy'
command_class = gdb.COMMAND_NONE
completer_class = gdb.COMPLETE_COMMAND
def __init__(self, name, command_class, completer_class):
# keep the signature 2.5 compatible (i.e. do not use f(*a, k=v)
super(CythonCommand, self).__init__(name, command_class,
completer_class, prefix=True)
commands = dict(
# GDB commands
import_ = CyImport.register(),
break_ = CyBreak.register(),
step = CyStep.register(),
next = CyNext.register(),
run = CyRun.register(),
cont = CyCont.register(),
finish = CyFinish.register(),
up = CyUp.register(),
down = CyDown.register(),
select = CySelect.register(),
bt = CyBacktrace.register(),
list = CyList.register(),
print_ = CyPrint.register(),
locals = CyLocals.register(),
globals = CyGlobals.register(),
exec_ = libpython.FixGdbCommand('cy exec', '-cy-exec'),
_exec = CyExec.register(),
set = CySet.register(),
# GDB functions
cy_cname = CyCName('cy_cname'),
cy_cvalue = CyCValue('cy_cvalue'),
cy_lineno = CyLine('cy_lineno'),
cy_eval = CyEval('cy_eval'),
)
for command_name, command in commands.items():
command.cy = self
setattr(self, command_name, command)
self.cy = self
# Cython module namespace
self.cython_namespace = {}
# maps (unique) qualified function names (e.g.
# cythonmodule.ClassName.method_name) to the CythonFunction object
self.functions_by_qualified_name = {}
# unique cnames of Cython functions
self.functions_by_cname = {}
# map function names like method_name to a list of all such
# CythonFunction objects
self.functions_by_name = collections.defaultdict(list)
| CyCy |
python | ray-project__ray | doc/source/ray-overview/examples/mcp-ray-serve/mcp-gateway-with-existing-ray-apps/mcp_gateway.py | {
"start": 2129,
"end": 2405
} | class ____:
def __init__(self):
pass
# --------------------------------------------------------------------------
# 5. Expose the Serve application graph
# --------------------------------------------------------------------------
app = MCPGateway.bind()
| MCPGateway |
python | facebookresearch__faiss | benchs/bench_fw/index.py | {
"start": 31719,
"end": 32231
} | class ____(IndexBase):
pre_transform_index: Index
def __init__(self, pre_transform_index: Index):
self.pre_transform_index = pre_transform_index
super().__init__()
def get_codec_name(self):
pre_transform_codec_name = self.pre_transform_index.get_codec_name()
return f"{pre_transform_codec_name}pretransform."
def get_codec(self):
return self.get_codec()
# IndexFromFactory is for creating and training indices from scratch
@dataclass
| IndexFromPreTransform |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.