language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/components.py | {
"start": 12112,
"end": 13195
} | class ____(RecordTransformation):
"""
Custom transformation that takes in a record that represents a map of all dynamic properties retrieved
from the Hubspot properties endpoint. This mapping nests all of these fields under a sub-object called
`properties` and updates all the property field names at the top level to be prefixed with
`properties_<property_name>`.
"""
def transform(
self,
record: Dict[str, Any],
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> None:
transformed_record = {
"properties": {
"type": "object",
"properties": {},
}
}
for key, value in record.items():
transformed_record["properties"]["properties"][key] = value
updated_key = f"properties_{key}"
transformed_record[updated_key] = value
record.clear()
record.update(transformed_record)
| HubspotRenamePropertiesTransformation |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/log.py | {
"start": 1570,
"end": 1679
} | class ____(BaseModel):
"""Response for the external log URL endpoint."""
url: str
| ExternalLogUrlResponse |
python | tiangolo__fastapi | docs_src/response_model/tutorial001_01_py310.py | {
"start": 78,
"end": 469
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: list[str] = []
@app.post("/items/")
async def create_item(item: Item) -> Item:
return item
@app.get("/items/")
async def read_items() -> list[Item]:
return [
Item(name="Portal Gun", price=42.0),
Item(name="Plumbus", price=32.0),
]
| Item |
python | getsentry__sentry | src/sentry/deletions/defaults/organization.py | {
"start": 436,
"end": 3860
} | class ____(ModelDeletionTask[Organization]):
def should_proceed(self, instance: Organization) -> bool:
"""
Only delete organizations that haven't been undeleted.
"""
return instance.status in {
OrganizationStatus.PENDING_DELETION,
OrganizationStatus.DELETION_IN_PROGRESS,
}
def get_child_relations(self, instance: Organization) -> list[BaseRelation]:
from sentry.deletions.defaults.discoversavedquery import DiscoverSavedQueryDeletionTask
from sentry.discover.models import DiscoverSavedQuery, TeamKeyTransaction
from sentry.incidents.models.alert_rule import AlertRule
from sentry.incidents.models.incident import Incident
from sentry.integrations.models.external_issue import ExternalIssue
from sentry.models.artifactbundle import ArtifactBundle
from sentry.models.commitauthor import CommitAuthor
from sentry.models.dashboard import Dashboard
from sentry.models.environment import Environment
from sentry.models.organizationmember import OrganizationMember
from sentry.models.project import Project
from sentry.models.promptsactivity import PromptsActivity
from sentry.models.release import Release
from sentry.models.repository import Repository
from sentry.models.team import Team
from sentry.models.transaction_threshold import ProjectTransactionThreshold
from sentry.workflow_engine.models import Workflow
# Team must come first
relations: list[BaseRelation] = [ModelRelation(Team, {"organization_id": instance.id})]
model_list = (
OrganizationMember,
Repository,
CommitAuthor,
Incident,
AlertRule,
Release,
Project,
Environment,
Dashboard,
TeamKeyTransaction,
ExternalIssue,
PromptsActivity,
ProjectTransactionThreshold,
ArtifactBundle,
)
relations.extend([ModelRelation(m, {"organization_id": instance.id}) for m in model_list])
# Explicitly assign the task here as it was getting replaced with BulkModelDeletionTask in CI.
relations.append(
ModelRelation(
DiscoverSavedQuery,
{"organization_id": instance.id},
task=DiscoverSavedQueryDeletionTask,
)
)
relations.append(ModelRelation(Workflow, {"organization_id": instance.id}))
return relations
def delete_instance(self, instance: Organization) -> None:
org_id = instance.id
org_slug = instance.slug
transaction.on_commit(
lambda: notify_overwatch_organization_deleted.delay(org_id, org_slug),
using=router.db_for_write(Organization),
)
super().delete_instance(instance)
def mark_deletion_in_progress(self, instance_list: Sequence[Organization]) -> None:
from sentry.models.organization import OrganizationStatus
for instance in instance_list:
if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:
update_organization_with_outbox_message(
org_id=instance.id,
update_data={"status": OrganizationStatus.DELETION_IN_PROGRESS},
)
| OrganizationDeletionTask |
python | openai__gym | gym/envs/box2d/bipedal_walker.py | {
"start": 27451,
"end": 31174
} | class ____:
def __init__(self):
raise error.Error(
"Error initializing BipedalWalkerHardcore Environment.\n"
"Currently, we do not support initializing this mode of environment by calling the class directly.\n"
"To use this environment, instead create it by specifying the hardcore keyword in gym.make, i.e.\n"
'gym.make("BipedalWalker-v3", hardcore=True)'
)
if __name__ == "__main__":
# Heurisic: suboptimal, have no notion of balance.
env = BipedalWalker()
env.reset()
steps = 0
total_reward = 0
a = np.array([0.0, 0.0, 0.0, 0.0])
STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1, 2, 3
SPEED = 0.29 # Will fall forward on higher speed
state = STAY_ON_ONE_LEG
moving_leg = 0
supporting_leg = 1 - moving_leg
SUPPORT_KNEE_ANGLE = +0.1
supporting_knee_angle = SUPPORT_KNEE_ANGLE
while True:
s, r, terminated, truncated, info = env.step(a)
total_reward += r
if steps % 20 == 0 or terminated or truncated:
print("\naction " + str([f"{x:+0.2f}" for x in a]))
print(f"step {steps} total_reward {total_reward:+0.2f}")
print("hull " + str([f"{x:+0.2f}" for x in s[0:4]]))
print("leg0 " + str([f"{x:+0.2f}" for x in s[4:9]]))
print("leg1 " + str([f"{x:+0.2f}" for x in s[9:14]]))
steps += 1
contact0 = s[8]
contact1 = s[13]
moving_s_base = 4 + 5 * moving_leg
supporting_s_base = 4 + 5 * supporting_leg
hip_targ = [None, None] # -0.8 .. +1.1
knee_targ = [None, None] # -0.6 .. +0.9
hip_todo = [0.0, 0.0]
knee_todo = [0.0, 0.0]
if state == STAY_ON_ONE_LEG:
hip_targ[moving_leg] = 1.1
knee_targ[moving_leg] = -0.6
supporting_knee_angle += 0.03
if s[2] > SPEED:
supporting_knee_angle += 0.03
supporting_knee_angle = min(supporting_knee_angle, SUPPORT_KNEE_ANGLE)
knee_targ[supporting_leg] = supporting_knee_angle
if s[supporting_s_base + 0] < 0.10: # supporting leg is behind
state = PUT_OTHER_DOWN
if state == PUT_OTHER_DOWN:
hip_targ[moving_leg] = +0.1
knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE
knee_targ[supporting_leg] = supporting_knee_angle
if s[moving_s_base + 4]:
state = PUSH_OFF
supporting_knee_angle = min(s[moving_s_base + 2], SUPPORT_KNEE_ANGLE)
if state == PUSH_OFF:
knee_targ[moving_leg] = supporting_knee_angle
knee_targ[supporting_leg] = +1.0
if s[supporting_s_base + 2] > 0.88 or s[2] > 1.2 * SPEED:
state = STAY_ON_ONE_LEG
moving_leg = 1 - moving_leg
supporting_leg = 1 - moving_leg
if hip_targ[0]:
hip_todo[0] = 0.9 * (hip_targ[0] - s[4]) - 0.25 * s[5]
if hip_targ[1]:
hip_todo[1] = 0.9 * (hip_targ[1] - s[9]) - 0.25 * s[10]
if knee_targ[0]:
knee_todo[0] = 4.0 * (knee_targ[0] - s[6]) - 0.25 * s[7]
if knee_targ[1]:
knee_todo[1] = 4.0 * (knee_targ[1] - s[11]) - 0.25 * s[12]
hip_todo[0] -= 0.9 * (0 - s[0]) - 1.5 * s[1] # PID to keep head strait
hip_todo[1] -= 0.9 * (0 - s[0]) - 1.5 * s[1]
knee_todo[0] -= 15.0 * s[3] # vertical speed, to damp oscillations
knee_todo[1] -= 15.0 * s[3]
a[0] = hip_todo[0]
a[1] = knee_todo[0]
a[2] = hip_todo[1]
a[3] = knee_todo[1]
a = np.clip(0.5 * a, -1.0, 1.0)
if terminated or truncated:
break
| BipedalWalkerHardcore |
python | facebook__pyre-check | client/commands/incremental.py | {
"start": 1418,
"end": 7490
} | class ____:
exit_code: commands.ExitCode
connected_to: ServerStatus
def parse_type_error_response_json(response_json: object) -> TypeErrors:
try:
# The response JSON is expected to have one of the following form:
# `["TypeErrors", [error_json0, error_json1, ...]]` (legacy form)
# `["TypeErrors", {"errors": [error_json0, ...], "build_failure": "..."}]`
if (
isinstance(response_json, list)
and len(response_json) > 1
and response_json[0] == "TypeErrors"
):
errors_json = response_json[1]
if isinstance(errors_json, list):
return TypeErrors(
errors=[
error.Error.from_json(error_json) for error_json in errors_json
],
build_failure=None,
)
elif isinstance(errors_json, dict):
error_list = errors_json.get("errors", [])
build_failure = errors_json.get("build_failure", None)
if isinstance(error_list, list) and (
build_failure is None or isinstance(build_failure, str)
):
return TypeErrors(
errors=[
error.Error.from_json(error_json)
for error_json in error_list
],
build_failure=build_failure,
)
raise InvalidServerResponse(
f"Unexpected JSON response from server: {response_json}"
)
except error.ErrorParsingFailure as parsing_error:
message = f"Unexpected error JSON from server: {parsing_error}"
raise InvalidServerResponse(message) from parsing_error
def parse_type_error_response(response: str) -> TypeErrors:
try:
response_json = json.loads(response)
return parse_type_error_response_json(response_json)
except json.JSONDecodeError as decode_error:
message = f"Cannot parse response as JSON: {decode_error}"
raise InvalidServerResponse(message) from decode_error
def _read_type_errors(socket_path: Path) -> TypeErrors:
with connections.connect(socket_path) as (
input_channel,
output_channel,
):
# The empty list argument means we want all type errors from the server.
output_channel.write('["DisplayTypeError", []]\n')
return parse_type_error_response(input_channel.readline())
def privacy_error_filter(error: error.Error) -> bool:
return error.code >= 3000
def display_type_errors(
errors: List[error.Error],
output: str,
filter: Optional[Callable[[error.Error], bool]],
) -> None:
error.print_errors(
[
error.relativize_path(against=Path.cwd())
for error in errors
if filter is None or filter(error)
],
output=output,
)
def _show_progress_log_and_display_type_errors(
log_path: Path,
socket_path: Path,
output: str,
error_filter: Optional[Callable[[error.Error], bool]],
remote_logging: Optional[backend_arguments.RemoteLogging],
) -> commands.ExitCode:
LOG.info("Waiting for server...")
with start.background_logging(log_path):
type_errors = _read_type_errors(socket_path)
display_type_errors(type_errors.errors, output=output, filter=error_filter)
if type_errors.build_failure is not None:
LOG.warning("You may be seeing stale type checking results. Reason:")
LOG.warning(type_errors.build_failure)
return (
commands.ExitCode.SUCCESS
if len(type_errors.errors) == 0
else commands.ExitCode.FOUND_ERRORS
)
def run_incremental(
configuration: frontend_configuration.Base,
incremental_arguments: command_arguments.IncrementalArguments,
) -> ExitStatus:
flavor = identifiers.PyreFlavor.CLASSIC
socket_path = daemon_socket.get_socket_path(
configuration.get_project_identifier(),
flavor=flavor,
)
# Need to be consistent with the log symlink location in start command
log_path = (
configuration.get_log_directory()
/ flavor.server_log_subdirectory()
/ "server.stderr"
)
output = incremental_arguments.output
remote_logging = backend_arguments.RemoteLogging.create(
configuration.get_remote_logger(),
incremental_arguments.start_arguments.get_log_identifier(),
)
error_filter = (
privacy_error_filter if configuration.get_only_privacy_errors() else None
)
try:
exit_code = _show_progress_log_and_display_type_errors(
log_path,
socket_path,
output,
error_filter,
remote_logging,
)
return ExitStatus(
exit_code=exit_code, connected_to=ServerStatus.ALREADY_RUNNING
)
except connections.ConnectionFailure:
pass
if incremental_arguments.no_start:
raise commands.ClientException("Cannot find a running Pyre server.")
LOG.info("Cannot find a running Pyre server. Starting a new one...")
start_status = start.run(configuration, incremental_arguments.start_arguments)
if start_status != commands.ExitCode.SUCCESS:
raise commands.ClientException(
f"`pyre start` failed with non-zero exit code: {start_status}"
)
exit_code = _show_progress_log_and_display_type_errors(
log_path, socket_path, output, error_filter, remote_logging
)
return ExitStatus(exit_code=exit_code, connected_to=ServerStatus.NEWLY_STARTED)
def run(
configuration: frontend_configuration.Base,
incremental_arguments: command_arguments.IncrementalArguments,
) -> ExitStatus:
try:
return run_incremental(configuration, incremental_arguments)
except server_event.ServerStartException as error:
raise commands.ClientException(
f"{error}", exit_code=error.kind.to_exit_code()
) from error
| ExitStatus |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py | {
"start": 22129,
"end": 23619
} | class ____(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, paddings):
block_shape = np.array(block_shape)
paddings = np.array(paddings).reshape((len(block_shape), 2))
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, paddings):
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, paddings)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
self._compare([1, 4, 6, 5], [2, 2], [[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def testSmall2(self):
self._compare([2, 8, 6, 2], [2, 2], [[0, 0], [0, 0]])
@test_util.run_deprecated_v1
def testSmallPad1(self):
self._compare([2, 4, 6, 2], [2, 2], [[1, 1], [1, 1]])
@test_util.run_deprecated_v1
def testSmallPadThreeBlockDims(self):
self._compare([2, 2, 4, 3, 2], [2, 2, 2], [[1, 1], [1, 1], [1, 0]])
| SpaceToBatchNDGradientTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/win32_types.py | {
"start": 3905,
"end": 4197
} | class ____(Structure):
"""struct in wincon.h."""
if TYPE_CHECKING:
Left: int
Top: int
Right: int
Bottom: int
_fields_ = [
("Left", c_short),
("Top", c_short),
("Right", c_short),
("Bottom", c_short),
]
| SMALL_RECT |
python | jazzband__django-waffle | test_app/views.py | {
"start": 3358,
"end": 3443
} | class ____(WaffleSampleMixin, BaseWaffleView):
waffle_sample = '!foo'
| SampleOffView |
python | huggingface__transformers | src/transformers/models/oneformer/convert_to_hf_oneformer.py | {
"start": 9145,
"end": 50477
} | class ____:
def __init__(self, original_model: nn.Module, config: OneFormerConfig):
self.original_model = original_model
self.config = config
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
for src_key, dst_key in renamed_keys:
dst_state_dict[dst_key] = src_state_dict.pop(src_key)
# Swin Backbone
def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
dst_prefix: str = "pixel_level_module.encoder"
src_prefix: str = "backbone"
renamed_keys = [
(
f"{src_prefix}.patch_embed.proj.weight",
f"{dst_prefix}.embeddings.patch_embeddings.projection.weight",
),
(f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"),
(f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"),
(f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"),
]
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend(
[ # src, dst
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table",
),
]
)
# now we need to handle the attentions
# read in weights + bias of input projection layer of cross-attention
src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight"
] = src_att_weight[:offset, :]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias"
] = src_att_bias[:offset]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight"
] = src_att_weight[offset : offset * 2, :]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias"
] = src_att_bias[offset : offset * 2]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight"
] = src_att_weight[-offset:, :]
dst_state_dict[
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias"
] = src_att_bias[-offset:]
# let's pop them
src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
# proj
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias",
),
]
)
# second norm
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias",
),
]
)
# mlp
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias",
),
]
)
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index",
f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index",
)
]
)
if layer_idx < num_layers - 1:
# patch merging
renamed_keys.extend(
[
(
f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight",
f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight",
),
(
f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias",
f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias",
),
]
)
# hidden states norms
renamed_keys.extend(
[
(
f"{src_prefix}.norm{layer_idx}.weight",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight",
),
(
f"{src_prefix}.norm{layer_idx}.bias",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias",
),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
# Dinat Backbone
def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
dst_prefix: str = "pixel_level_module.encoder"
src_prefix: str = "backbone"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
renamed_keys = rename_keys_for_weight_bias(f"{src_prefix}.patch_embed.norm", f"{dst_prefix}.embeddings.norm")
for i in range(2):
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.patch_embed.proj.{i}",
f"{dst_prefix}.embeddings.patch_embeddings.projection.{i}",
)
)
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before",
)
)
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after",
)
)
renamed_keys.extend(
[ # src, dst
(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb",
),
]
)
# now we need to handle the attentions
# read in weights + bias of input projection layer of cross-attention
src_att_weight = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"]
src_att_bias = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"]
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight"
] = src_att_weight[:offset, :]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias"
] = src_att_bias[:offset]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight"
] = src_att_weight[offset : offset * 2, :]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias"
] = src_att_bias[offset : offset * 2]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight"
] = src_att_weight[-offset:, :]
dst_state_dict[
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias"
] = src_att_bias[-offset:]
# let's pop them
src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight")
src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias")
# proj
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense",
)
)
# mlp
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense",
)
)
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2",
f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense",
)
)
if layer_idx < num_layers - 1:
# patch merging
renamed_keys.extend(
[
(
f"{src_prefix}.levels.{layer_idx}.downsample.reduction.weight",
f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight",
),
(
f"{src_prefix}.levels.{layer_idx}.downsample.norm.weight",
f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight",
),
(
f"{src_prefix}.levels.{layer_idx}.downsample.norm.bias",
f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias",
),
]
)
# hidden states norms
renamed_keys.extend(
[
(
f"{src_prefix}.norm{layer_idx}.weight",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight",
),
(
f"{src_prefix}.norm{layer_idx}.bias",
f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias",
),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
# Backbone + Pixel Decoder
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool):
dst_prefix: str = "pixel_level_module.decoder"
src_prefix: str = "sem_seg_head.pixel_decoder"
if is_swin:
self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config)
else:
self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
self_attn_keys = []
self_attn_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights")
)
self_attn_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj")
)
self_attn_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets")
)
self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj"))
return self_attn_keys
def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
encoder_keys = []
encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1"))
encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2"))
encoder_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm")
)
encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm"))
encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn"))
return encoder_keys
# convolution layer for final features
renamed_keys = [
(f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"),
(f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"),
(f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"),
]
renamed_keys.extend(
[
(f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"),
(f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"),
(f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"),
]
)
# proj layers
for i in range(3):
for j in range(2):
renamed_keys.extend(
[
(f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"),
(f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"),
]
)
renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")])
# layers
for layer_idx in range(self.config.encoder_layers):
renamed_keys.extend(
rename_keys_for_encoder_layer(
f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}"
)
)
# proj
renamed_keys.extend(
[
(f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"),
(f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"),
]
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
# Transformer Decoder
def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module.decoder.layers"
src_prefix: str = "sem_seg_head.predictor"
for i in range(self.config.decoder_layers - 1):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = src_state_dict.pop(
f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight"
)
in_proj_bias = src_state_dict.pop(
f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias"
)
# next, add query, keys and values (in that order) to the state dict
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias"] = in_proj_bias[:256]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias"] = in_proj_bias[256:512]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias"] = in_proj_bias[-256:]
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "transformer_module"
src_prefix: str = "sem_seg_head.predictor"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
attn_keys = [
(f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"),
(f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"),
]
attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
return attn_keys
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
attn_keys = []
attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
return attn_keys
def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str):
query_transformer_layer_keys = []
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.norm1")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.norm2")
)
query_transformer_layer_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.norm3", f"{dst_prefix}.norm3")
)
query_transformer_layer_keys.extend(
rename_keys_for_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")
)
query_transformer_layer_keys.extend(
rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn")
)
return query_transformer_layer_keys
def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str):
cross_attn_layer_keys = []
cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
cross_attn_layer_keys.extend(
rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn")
)
return cross_attn_layer_keys
def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str):
self_attn_layer_keys = []
self_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
self_attn_layer_keys.extend(
rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")
)
return self_attn_layer_keys
def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str):
ffn_layer_keys = []
ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1"))
ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2"))
ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm"))
return ffn_layer_keys
def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int):
transformer_decoder_layer_keys = []
transformer_decoder_layer_keys.extend(
rename_keys_for_cross_attn_layer(
f"{src_prefix}.transformer_cross_attention_layers.{idx}", f"{dst_prefix}.{idx}.cross_attn"
)
)
transformer_decoder_layer_keys.extend(
rename_keys_for_self_attn_layer(
f"{src_prefix}.transformer_self_attention_layers.{idx}", f"{dst_prefix}.{idx}.self_attn"
)
)
transformer_decoder_layer_keys.extend(
rename_keys_for_ffn_layer(f"{src_prefix}.transformer_ffn_layers.{idx}", f"{dst_prefix}.{idx}.ffn")
)
return transformer_decoder_layer_keys
# positional embedding for object queries
renamed_keys = [
(f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"),
(f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"),
]
# norm
renamed_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.decoder_norm", f"{dst_prefix}.decoder.decoder_norm")
)
# proj
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.class_input_proj", f"{dst_prefix}.decoder.query_input_projection"
)
)
renamed_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.class_embed", f"{dst_prefix}.decoder.class_embed")
)
for i in range(3):
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.mask_embed.layers.{i}", f"{dst_prefix}.decoder.mask_embed.layers.{i}.0"
)
)
# norm
renamed_keys.extend(
rename_keys_for_weight_bias(
f"{src_prefix}.class_transformer.decoder.norm", f"{dst_prefix}.decoder.query_transformer.decoder.norm"
)
)
# transformer to update queries with task tokens
for i in range(self.config.query_dec_layers):
renamed_keys.extend(
rename_keys_for_query_transformer_layer(
f"{src_prefix}.class_transformer.decoder.layers.{i}",
f"{dst_prefix}.decoder.query_transformer.decoder.layers.{i}",
)
)
# decoder layers
for i in range(self.config.decoder_layers - 1):
renamed_keys.extend(
rename_keys_for_transformer_decoder_layer(
f"{src_prefix}",
f"{dst_prefix}.decoder.layers",
i,
)
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict)
def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "task_encoder"
src_prefix: str = "task_mlp"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
renamed_keys = []
for i in range(2):
renamed_keys.extend(
rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.task_mlp.layers.{i}.0")
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "text_mapper.text_projector"
src_prefix: str = "text_projector"
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
renamed_keys = []
for i in range(self.config.text_encoder_config["text_encoder_proj_layers"]):
renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.{i}.0"))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = "text_mapper.text_encoder"
src_prefix: str = "text_encoder"
self.replace_text_projector(dst_state_dict, src_state_dict)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [
(f"{src_prefix}.weight", f"{dst_prefix}.weight"),
(f"{src_prefix}.bias", f"{dst_prefix}.bias"),
]
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
attn_keys = [
(f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"),
(f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"),
]
attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj"))
return attn_keys
def rename_keys_for_layer(src_prefix: str, dst_prefix: str):
resblock_keys = []
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_fc", f"{dst_prefix}.mlp.fc1"))
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_proj", f"{dst_prefix}.mlp.fc2"))
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_1", f"{dst_prefix}.layer_norm1"))
resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_2", f"{dst_prefix}.layer_norm2"))
resblock_keys.extend(rename_keys_for_attn(f"{src_prefix}.attn", f"{dst_prefix}.self_attn"))
return resblock_keys
renamed_keys = [
("prompt_ctx.weight", "text_mapper.prompt_ctx.weight"),
]
renamed_keys.extend(
[
(f"{src_prefix}.positional_embedding", f"{dst_prefix}.positional_embedding"),
(f"{src_prefix}.token_embedding.weight", f"{dst_prefix}.token_embedding.weight"),
]
)
renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_final", f"{dst_prefix}.ln_final"))
for i in range(self.config.text_encoder_config["text_encoder_num_layers"]):
renamed_keys.extend(
rename_keys_for_layer(
f"{src_prefix}.transformer.resblocks.{i}", f"{dst_prefix}.transformer.layers.{i}"
)
)
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel:
dst_state_dict = TrackedStateDict(oneformer.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin)
self.replace_transformer_module(dst_state_dict, src_state_dict)
self.replace_task_mlp(dst_state_dict, src_state_dict)
if self.config.is_training:
self.replace_text_mapper(dst_state_dict, src_state_dict)
logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}")
logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}")
logger.info("🙌 Done")
oneformer.load_state_dict(dst_state_dict)
return oneformer
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
checkpoints: list[Path] = checkpoints_dir.glob("**/*.pth")
for checkpoint in checkpoints:
logger.info(f"Converting {checkpoint.stem}")
# find associated config file
config: Path = config_dir / f"{checkpoint.stem}.yaml"
yield config, checkpoint
def post_process_sem_seg_output(outputs: OneFormerForUniversalSegmentationOutput, target_size: tuple[int, int]):
# class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1]
class_queries_logits = outputs.class_queries_logits
# masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH]
masks_queries_logits = outputs.masks_queries_logits
if target_size is not None:
masks_queries_logits = torch.nn.functional.interpolate(
masks_queries_logits,
size=target_size,
mode="bilinear",
align_corners=False,
)
# remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
# mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH]
masks_probs = masks_queries_logits.sigmoid()
# now we want to sum over the queries,
# $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $
# where $ softmax(p) \in R^{q, c} $ is the mask classes
# and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities
# b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
return segmentation
def test(
original_model,
our_model: OneFormerForUniversalSegmentation,
processor: OneFormerProcessor,
model_repo: str,
):
def _preprocess_text(text_list=None, max_length=77):
if text_list is None:
raise ValueError("tokens cannot be None.")
tokens = tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True)
attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"]
token_inputs = []
for attn_mask, input_id in zip(attention_masks, input_ids):
token = torch.tensor(attn_mask) * torch.tensor(input_id)
token_inputs.append(token.unsqueeze(0))
token_inputs = torch.cat(token_inputs, dim=0)
return token_inputs
with torch.no_grad():
tokenizer = CLIPTokenizer.from_pretrained(model_repo)
original_model = original_model.eval()
our_model = our_model.eval()
im = prepare_img()
tr = T.Compose(
[
T.Resize((640, 640)),
T.ToTensor(),
T.Normalize(
mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0,
std=torch.tensor([58.395, 57.120, 57.375]) / 255.0,
),
],
)
x = tr(im).unsqueeze(0)
task_input = ["the task is semantic"]
task_token = _preprocess_text(task_input, max_length=processor.task_seq_length)
original_model_backbone_features = original_model.backbone(x.clone())
our_model_output: OneFormerModelOutput = our_model.model(x.clone(), task_token, output_hidden_states=True)
for original_model_feature, our_model_feature in zip(
original_model_backbone_features.values(), our_model_output.encoder_hidden_states
):
assert torch.allclose(original_model_feature, our_model_feature, atol=3e-3), (
"The backbone features are not the same."
)
mask_features, _, multi_scale_features, _, _ = original_model.sem_seg_head.pixel_decoder.forward_features(
original_model_backbone_features
)
original_pixel_decoder_features = []
original_pixel_decoder_features.append(mask_features)
for i in range(len(multi_scale_features)):
original_pixel_decoder_features.append(multi_scale_features[i])
for original_model_feature, our_model_feature in zip(
original_pixel_decoder_features, our_model_output.pixel_decoder_hidden_states
):
assert torch.allclose(original_model_feature, our_model_feature, atol=3e-4), (
"The pixel decoder feature are not the same"
)
tr_complete = T.Compose(
[
T.Resize((640, 640)),
T.ToTensor(),
],
)
y = (tr_complete(im) * 255.0).to(torch.int).float()
# let's test the full model
original_model_out = original_model([{"image": y.clone(), "task": "The task is semantic"}])
original_segmentation = original_model_out[0]["sem_seg"]
our_model_out: OneFormerForUniversalSegmentationOutput = our_model(
x.clone(), task_token, output_hidden_states=True
)
our_segmentation = post_process_sem_seg_output(our_model_out, target_size=(640, 640))[0]
assert torch.allclose(original_segmentation, our_segmentation, atol=1e-3), (
"The segmentation image is not the same."
)
logger.info("Test passed!")
def get_name(checkpoint_file: Path):
model_name_raw: str = checkpoint_file.stem
backbone = "swin" if "swin" in model_name_raw else "dinat"
dataset = ""
if "coco" in model_name_raw:
dataset = "coco"
elif "ade20k" in model_name_raw:
dataset = "ade20k"
elif "cityscapes" in model_name_raw:
dataset = "cityscapes"
else:
raise ValueError(
f"{model_name_raw} must be wrong since we didn't find 'coco' or 'ade20k' or 'cityscapes' in it "
)
backbone_types = ["tiny", "large"]
backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0]
model_name = f"oneformer_{dataset}_{backbone}_{backbone_type}"
return model_name
if __name__ == "__main__":
parser = ArgumentParser(
description=(
"Command line to convert the original oneformer models (with swin backbone) to transformers"
" implementation."
)
)
parser.add_argument(
"--checkpoints_dir",
type=Path,
help=(
"A directory containing the model's checkpoints. The directory has to have the following structure:"
" structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pth; where <CONFIG_NAME> name must follow the"
" following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>"
),
)
parser.add_argument(
"--configs_dir",
type=Path,
help=(
"A directory containing the model's configs, see detectron2 doc. The directory has to have the following"
" structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml; where <CONFIG_NAME> name must follow the"
" following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>"
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
required=True,
type=Path,
help="Path to the folder to output PyTorch models.",
)
parser.add_argument(
"--oneformer_dir",
required=True,
type=Path,
help=(
"A path to OneFormer's original implementation directory. You can download from here: "
"https://github.com/SHI-Labs/OneFormer"
),
)
args = parser.parse_args()
checkpoints_dir: Path = args.checkpoints_dir
config_dir: Path = args.configs_dir
save_directory: Path = args.pytorch_dump_folder_path
oneformer_dir: Path = args.oneformer_dir
# append the path to the parents to oneformer dir
sys.path.append(str(oneformer_dir.parent))
# and import what's needed
from OneFormer.oneformer import add_common_config, add_dinat_config, add_oneformer_config, add_swin_config
from OneFormer.oneformer.oneformer_model import OneFormer as OriginalOneFormer
if not save_directory.exists():
save_directory.mkdir(parents=True)
for config_file, checkpoint_file in OriginalOneFormerCheckpointToOursConverter.using_dirs(
checkpoints_dir, config_dir
):
processor = OriginalOneFormerConfigToProcessorConverter()(
setup_cfg(Args(config_file=config_file)), os.path.join("shi-labs", config_file.stem)
)
original_config = setup_cfg(Args(config_file=config_file))
oneformer_kwargs = OriginalOneFormer.from_config(original_config)
original_model = OriginalOneFormer(**oneformer_kwargs).eval()
DetectionCheckpointer(original_model).load(str(checkpoint_file))
is_swin = "swin" in config_file.stem
config: OneFormerConfig = OriginalOneFormerConfigToOursConverter()(original_config, is_swin)
oneformer = OneFormerModel(config=config).eval()
converter = OriginalOneFormerCheckpointToOursConverter(original_model, config)
oneformer = converter.convert(oneformer, is_swin)
oneformer_for_universal_segmentation = OneFormerForUniversalSegmentation(config=config).eval()
oneformer_for_universal_segmentation.model = oneformer
test(
original_model,
oneformer_for_universal_segmentation,
processor,
os.path.join("shi-labs", config_file.stem),
)
model_name = get_name(checkpoint_file)
logger.info(f"Saving {model_name}")
processor.save_pretrained(save_directory / model_name)
oneformer_for_universal_segmentation.save_pretrained(save_directory / model_name)
model_id = f"shi-labs/{model_name}"
processor.push_to_hub(repo_id=model_id)
oneformer_for_universal_segmentation.push_to_hub(repo_id=model_id)
| OriginalOneFormerCheckpointToOursConverter |
python | tensorflow__tensorflow | tensorflow/lite/python/metrics/metrics_nonportable.py | {
"start": 2545,
"end": 4398
} | class ____(metrics_interface.TFLiteMetricsInterface):
"""TFLite metrics helper for prod (borg) environment.
Attributes:
model_hash: A string containing the hash of the model binary.
model_path: A string containing the path of the model for debugging
purposes.
"""
def __init__(self,
model_hash: Optional[Text] = None,
model_path: Optional[Text] = None) -> None:
del self # Temporarily removing self until parameter logic is implemented.
if model_hash and not model_path or not model_hash and model_path:
raise ValueError('Both model metadata(model_hash, model_path) should be '
'given at the same time.')
if model_hash:
# TODO(b/180400857): Create stub once the service is implemented.
pass
def increase_counter_debugger_creation(self):
_counter_debugger_creation.get_cell().increase_by(1)
def increase_counter_interpreter_creation(self):
_counter_interpreter_creation.get_cell('python').increase_by(1)
def increase_counter_converter_attempt(self):
_counter_conversion_attempt.get_cell().increase_by(1)
def increase_counter_converter_success(self):
_counter_conversion_success.get_cell().increase_by(1)
def set_converter_param(self, name, value):
_gauge_conversion_params.get_cell(name).set(value)
def set_converter_error(
self, error_data: converter_error_data_pb2.ConverterErrorData):
error_code_str = converter_error_data_pb2.ConverterErrorData.ErrorCode.Name(
error_data.error_code)
_gauge_conversion_errors.get_cell(
error_data.component,
error_data.subcomponent,
error_data.operator.name,
error_code_str,
).set(error_data.error_message)
def set_converter_latency(self, value):
_gauge_conversion_latency.get_cell().set(value)
| TFLiteMetrics |
python | getsentry__sentry | src/sentry/replays/lib/storage.py | {
"start": 2171,
"end": 2712
} | class ____(ABC):
@abstractmethod
def delete(self, segment: RecordingSegmentStorageMeta) -> None:
"""Remove a blob from remote storage."""
raise NotImplementedError
@abstractmethod
def get(self, segment: RecordingSegmentStorageMeta) -> bytes | None:
"""Return blob from remote storage."""
raise NotImplementedError
@abstractmethod
def set(self, segment: RecordingSegmentStorageMeta, value: bytes) -> None:
"""Set blob in remote storage."""
raise NotImplementedError
| Blob |
python | getsentry__sentry | src/sentry/explore/models.py | {
"start": 1606,
"end": 2386
} | class ____(DefaultFieldsModel):
__relocation_scope__ = RelocationScope.Organization
user_id = HybridCloudForeignKey("sentry.User", on_delete="CASCADE")
organization = FlexibleForeignKey("sentry.Organization")
explore_saved_query = FlexibleForeignKey("explore.ExploreSavedQuery")
last_visited = models.DateTimeField(null=False, default=timezone.now)
class Meta:
app_label = "explore"
db_table = "explore_exploresavedquerylastvisited"
constraints = [
UniqueConstraint(
fields=["user_id", "organization_id", "explore_saved_query_id"],
name="explore_exploresavedquerylastvisited_unique_last_visited_per_org_user_query",
)
]
@region_silo_model
| ExploreSavedQueryLastVisited |
python | tornadoweb__tornado | tornado/httpclient.py | {
"start": 30195,
"end": 31897
} | class ____:
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(
self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]
) -> None:
self.request = request
self.defaults = defaults
def __getattr__(self, name: str) -> Any:
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main() -> None:
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
define("proxy_host", type=str)
define("proxy_port", type=int)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(
arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
proxy_host=options.proxy_host,
proxy_port=options.proxy_port,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
| _RequestProxy |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/workspace/workspace.py | {
"start": 727,
"end": 830
} | class ____(Enum):
CODE_SERVER = "CODE_SERVER"
CONNECTION = "CONNECTION"
@record
| DefinitionsSource |
python | tensorflow__tensorflow | tensorflow/python/distribute/central_storage_strategy.py | {
"start": 1060,
"end": 8789
} | class ____(distribute_lib.Strategy):
"""A one-machine strategy that puts all variables on a single device.
Variables are assigned to local CPU or the only GPU. If there is more
than one GPU, compute operations (other than variable update operations)
will be replicated across all GPUs.
For Example:
```
strategy = tf.distribute.experimental.CentralStorageStrategy()
# Create a dataset
ds = tf.data.Dataset.range(5).batch(2)
# Distribute that dataset
dist_dataset = strategy.experimental_distribute_dataset(ds)
with strategy.scope():
@tf.function
def train_step(val):
return val + 1
# Iterate over the distributed dataset
for x in dist_dataset:
# process dataset elements
strategy.run(train_step, args=(x,))
```
"""
def __init__(self, compute_devices=None, parameter_device=None):
extended = parameter_server_strategy.ParameterServerStrategyExtended(
self,
compute_devices=compute_devices,
parameter_device=parameter_device)
"""Initializes the strategy with optional device strings.
Args:
compute_devices: an optional list of strings for device to replicate models
on. If this is not provided, all local GPUs will be used; if there is no
GPU, local CPU will be used.
parameter_device: an optional device string for which device to put
variables on. The default one is CPU or GPU if there is only one.
"""
super(CentralStorageStrategy, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell('V2').set(
'CentralStorageStrategy')
@classmethod
def _from_num_gpus(cls, num_gpus):
return cls(device_util.local_devices_from_num_gpus(num_gpus))
def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=useless-super-delegation
"""Distributes a tf.data.Dataset instance provided via dataset.
The returned dataset is a wrapped strategy dataset which creates a
multidevice iterator under the hood. It prefetches the input data to the
specified devices on the worker. The returned distributed dataset can be
iterated over similar to how regular datasets can.
NOTE: Currently, the user cannot add any more transformations to a
distributed dataset.
For Example:
```
strategy = tf.distribute.CentralStorageStrategy() # with 1 CPU and 1 GPU
dataset = tf.data.Dataset.range(10).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
for x in dist_dataset:
print(x) # Prints PerReplica values [0, 1], [2, 3],...
```
Args:
dataset: `tf.data.Dataset` to be prefetched to device.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`" that the caller can iterate over.
"""
if (options and options.experimental_replication_moden ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
'InputReplicationMode.PER_REPLICA '
'is only supported in '
'`experimental_distribute_datasets_from_function`.'
)
return super(CentralStorageStrategy, self).experimental_distribute_dataset(
dataset, options)
def experimental_local_results(self, value): # pylint: disable=useless-super-delegation
"""Returns the list of all local per-replica values contained in `value`.
In `CentralStorageStrategy` there is a single worker so the value returned
will be all the values on that worker.
Args:
value: A value returned by `run()`, `extended.call_for_each_replica()`,
or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return super(CentralStorageStrategy, self).experimental_local_results(value)
def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation
"""Run `fn` on each replica, with the given arguments.
In `CentralStorageStrategy`, `fn` is called on each of the compute
replicas, with the provided "per replica" arguments specific to that device.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`.
"""
return super(CentralStorageStrategy, self).run(fn, args, kwargs, options)
def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation
"""Reduce `value` across replicas.
Given a per-replica value returned by `run`, say a
per-example loss, the batch will be divided across all the replicas. This
function allows you to aggregate across replicas and optionally also across
batch elements. For example, if you have a global batch size of 8 and 2
replicas, values for examples `[0, 1, 2, 3]` will be on replica 0 and
`[4, 5, 6, 7]` will be on replica 1. By default, `reduce` will just
aggregate across replicas, returning `[0+4, 1+5, 2+6, 3+7]`. This is useful
when each replica is computing a scalar or some other value that doesn't
have a "batch" dimension (like a gradient). More often you will want to
aggregate across the global batch, which you can get by specifying the batch
dimension as the `axis`, typically `axis=0`. In this case it would return a
scalar `0+1+2+3+4+5+6+7`.
If there is a last partial batch, you will need to specify an axis so
that the resulting shape is consistent across replicas. So if the last
batch has size 6 and it is divided into [0, 1, 2, 3] and [4, 5], you
would get a shape mismatch unless you specify `axis=0`. If you specify
`tf.distribute.ReduceOp.MEAN`, using `axis=0` will use the correct
denominator of 6. Contrast this with computing `reduce_mean` to get a
scalar value on each replica and this function to average those means,
which will weigh some values `1/8` and others `1/4`.
For Example:
```
strategy = tf.distribute.experimental.CentralStorageStrategy(
compute_devices=['CPU:0', 'GPU:0'], parameter_device='CPU:0')
ds = tf.data.Dataset.range(10)
# Distribute that dataset
dist_dataset = strategy.experimental_distribute_dataset(ds)
with strategy.scope():
@tf.function
def train_step(val):
# pass through
return val
# Iterate over the distributed dataset
for x in dist_dataset:
result = strategy.run(train_step, args=(x,))
result = strategy.reduce(tf.distribute.ReduceOp.SUM, result,
axis=None).numpy()
# result: array([ 4, 6, 8, 10])
result = strategy.reduce(tf.distribute.ReduceOp.SUM, result, axis=0).numpy()
# result: 28
```
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by `run` to
be combined into a single tensor.
axis: Specifies the dimension to reduce along within each
replica's tensor. Should typically be set to the batch dimension, or
`None` to only reduce across replicas (e.g. if the tensor has no batch
dimension).
Returns:
A `Tensor`.
"""
return super(CentralStorageStrategy, self).reduce(reduce_op, value, axis)
@tf_export(v1=['distribute.experimental.CentralStorageStrategy']) # pylint: disable=missing-docstring
| CentralStorageStrategy |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/dist_optimizer_test.py | {
"start": 1235,
"end": 2719
} | class ____(optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
raise ValueError("Error creating optimizer.")
def step(self, closure=None):
raise NotImplementedError
def _call_method(method, obj_rref, *args, **kwargs):
return method(obj_rref.local_value(), *args, **kwargs)
def remote_method(method, obj_rref, *args, **kwargs):
"""
Call rpc.remote on a method in a remote object.
Args:
method: the method (for example, Class.method)
obj_rref (RRef): remote reference to the object
args: positional arguments to pass to the method
kwargs: keyword arguments to pass to the method
Returns a RRef to the remote method call result.
"""
return rpc.remote(
obj_rref.owner(),
_call_method,
args=[method, obj_rref] + list(args),
kwargs=kwargs,
)
def rpc_async_method(method, obj_rref, *args, **kwargs):
"""
Call rpc.rpc_async on a method in a remote object.
Args:
method: the method (for example, Class.method)
obj_rref (RRef): remote reference to the object
args: positional arguments to pass to the method
kwargs: keyword arguments to pass to the method
Returns a Future to the method call result.
"""
return rpc.rpc_async(
obj_rref.owner(),
_call_method,
args=[method, obj_rref] + list(args),
kwargs=kwargs,
)
| OptimizerFailingOnConstructor |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 38174,
"end": 38782
} | class ____(BaseReauthenticateView):
form_class = ReauthenticateForm
template_name = "account/reauthenticate." + app_settings.TEMPLATE_EXTENSION
def get_form_class(self):
return get_form_class(app_settings.FORMS, "reauthenticate", self.form_class)
def get_form_kwargs(self):
ret = super().get_form_kwargs()
ret["user"] = self.request.user
return ret
def form_valid(self, form):
flows.reauthentication.reauthenticate_by_password(self.request)
return super().form_valid(form)
reauthenticate = ReauthenticateView.as_view()
| ReauthenticateView |
python | pytorch__pytorch | test/inductor/test_segmented_tree.py | {
"start": 965,
"end": 9037
} | class ____(TestCase):
# Basic construction and initialization tests
def test_basic_construction(self):
values = [1, 3, 5, 7, 9]
tree = SegmentedTree(values, add_op, max_op, 0)
assert tree.summarize_range(0, 4) == 9
def test_empty_array(self):
with self.assertRaises(ValueError):
SegmentedTree([], add_op, max_op, 0)
# Property-based tests
@given(values=positive_integers)
def test_max_query_matches_naive(self, values):
tree = SegmentedTree(values, add_op, max_op, 0)
for start in range(len(values)):
for end in range(start, len(values)):
expected = naive_range_max(values, start, end)
actual = tree.summarize_range(start, end)
assert actual == expected, (
f"Range [{start}:{end}] expected {expected}, got {actual}"
)
@given(
values=positive_integers, range_indices=st.data(), update_value=update_values
)
def test_range_update(self, values, range_indices, update_value):
# Create a copy for naive implementation
naive_values = values.copy()
# Create segment tree
tree = SegmentedTree(values, add_op, max_op, 0)
# Get valid range indices
start, end = range_indices.draw(valid_range_indices(len(values)))
# Apply updates
tree.update_range(start, end, update_value)
naive_range_update(naive_values, start, end, update_value)
# Verify all possible ranges
for i in range(len(values)):
for j in range(i, len(values)):
expected = naive_range_max(naive_values, i, j)
actual = tree.summarize_range(i, j)
assert actual == expected, (
f"After update, range [{i}:{j}] expected {expected}, got {actual}"
)
@given(values=positive_integers, range_data=st.data())
def test_multiple_operations(self, values, range_data):
# Create a copy for naive implementation
naive_values = values.copy()
tree = SegmentedTree(values, add_op, max_op, 0)
# Perform multiple operations
num_operations = 5
for _ in range(num_operations):
# Randomly choose between query and update
operation_type = range_data.draw(st.sampled_from(["query", "update"]))
start, end = range_data.draw(valid_range_indices(len(values)))
if operation_type == "query":
expected = naive_range_max(naive_values, start, end)
actual = tree.summarize_range(start, end)
assert actual == expected, (
f"Range query [{start}:{end}] expected {expected}, got {actual}"
)
else: # update
update_value = range_data.draw(update_values)
tree.update_range(start, end, update_value)
naive_range_update(naive_values, start, end, update_value)
def test_single_element_ranges(self):
values = [1, 3, 5, 7, 9]
tree = SegmentedTree(values, add_op, max_op, 0)
for i in range(len(values)):
assert tree.summarize_range(i, i) == values[i], (
f"Single element range at index {i} failed"
)
def test_full_array_range(self):
values = [1, 3, 5, 7, 9]
tree = SegmentedTree(values, add_op, max_op, 0)
# Test querying the entire array
assert tree.summarize_range(0, len(values) - 1) == max(values)
# Update the entire array and test again
update_value = 10
tree.update_range(0, len(values) - 1, update_value)
expected = max([v + update_value for v in values])
assert tree.summarize_range(0, len(values) - 1) == expected
def test_boundary_conditions(self):
values = [1, 3, 5, 7, 9]
tree = SegmentedTree(values, add_op, max_op, 0)
# Test first element
assert tree.summarize_range(0, 0) == values[0]
# Test last element
assert tree.summarize_range(len(values) - 1, len(values) - 1) == values[-1]
# Test first two elements
assert tree.summarize_range(0, 1) == max(values[0:2])
# Test last two elements
assert tree.summarize_range(len(values) - 2, len(values) - 1) == max(
values[-2:]
)
def test_invalid_ranges(self):
values = [1, 3, 5, 7, 9]
tree = SegmentedTree(values, add_op, max_op, 0)
# Test start > end
with self.assertRaises(ValueError):
tree.summarize_range(3, 2)
with self.assertRaises(ValueError):
tree.update_range(4, 2, 10)
def test_out_of_bounds(self):
values = [1, 3, 5, 7, 9]
tree = SegmentedTree(values, add_op, max_op, 0)
# Test negative indices
with self.assertRaises(ValueError):
tree.summarize_range(-1, 3)
with self.assertRaises(ValueError):
tree.summarize_range(0, -1)
# Test indices >= n
with self.assertRaises(ValueError):
tree.summarize_range(0, len(values))
with self.assertRaises(ValueError):
tree.summarize_range(len(values), len(values) + 1)
# Test update with out of bounds indices
with self.assertRaises(ValueError):
tree.update_range(-1, 3, 10)
with self.assertRaises(ValueError):
tree.update_range(0, len(values), 10)
def test_overlapping_updates(self):
values = [1, 3, 5, 7, 9]
naive_values = values.copy()
tree = SegmentedTree(values, add_op, max_op, 0)
# Apply overlapping updates
tree.update_range(0, 2, 5) # Update [0, 1, 2]
naive_range_update(naive_values, 0, 2, 5)
tree.update_range(1, 3, 3) # Update [1, 2, 3]
naive_range_update(naive_values, 1, 3, 3)
# Verify all possible ranges
for i in range(len(values)):
for j in range(i, len(values)):
expected = naive_range_max(naive_values, i, j)
actual = tree.summarize_range(i, j)
assert actual == expected, (
f"After overlapping updates, range [{i}:{j}] expected {expected}, got {actual}"
)
def test_sequential_updates_and_queries(self):
values = [2, 4, 6, 8, 10, 12, 14]
naive_values = values.copy()
tree = SegmentedTree(values, add_op, max_op, 0)
# Sequence of operations
operations = [
("update", 1, 3, 5), # Update range [1, 2, 3] with +5
("query", 0, 4), # Query range [0, 1, 2, 3, 4]
("update", 2, 5, 3), # Update range [2, 3, 4, 5] with +3
("query", 1, 3), # Query range [1, 2, 3]
("update", 0, 6, 2), # Update entire array with +2
("query", 0, 6), # Query entire array
("query", 3, 5), # Query range [3, 4, 5]
]
for op in operations:
if op[0] == "update":
_, start, end, value = op
tree.update_range(start, end, value)
naive_range_update(naive_values, start, end, value)
# Verify tree state after update
for i in range(len(values)):
for j in range(i, len(values)):
expected = naive_range_max(naive_values, i, j)
actual = tree.summarize_range(i, j)
assert actual == expected, (
f"After update ({start}, {end}, {value}), query [{i}:{j}] expected {expected}, got {actual}"
)
else: # query
_, start, end = op
expected = naive_range_max(naive_values, start, end)
assert tree.summarize_range(start, end) == expected, (
f"Query [{start}:{end}] expected {expected}, got {tree.summarize_range(start, end)}"
)
if __name__ == "__main__":
run_tests()
| TestSegmentedTree |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/openai_functions/openapi.py | {
"start": 7510,
"end": 15412
} | class ____(Chain):
"""Chain for making a simple request to an API endpoint."""
request_method: Callable
"""Method to use for making the request."""
output_key: str = "response"
"""Key to use for the output of the request."""
input_key: str = "function"
"""Key to use for the input of the request."""
@property
@override
def input_keys(self) -> list[str]:
return [self.input_key]
@property
@override
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Run the logic of this chain and return the output."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
name = inputs[self.input_key].pop("name")
args = inputs[self.input_key].pop("arguments")
_pretty_name = get_colored_text(name, "green")
_pretty_args = get_colored_text(json.dumps(args, indent=2), "green")
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
_run_manager.on_text(_text)
api_response: Response = self.request_method(name, args)
if api_response.status_code != requests.codes.ok:
response = (
f"{api_response.status_code}: {api_response.reason}"
f"\nFor {name} "
f"Called with args: {args.get('params', '')}"
)
else:
try:
response = api_response.json()
except JSONDecodeError:
response = api_response.text
except Exception:
_logger.exception("Unexpected error parsing response as JSON")
response = api_response.text
return {self.output_key: response}
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
),
removal="1.0",
)
def get_openapi_chain(
spec: OpenAPISpec | str,
llm: BaseLanguageModel | None = None,
prompt: BasePromptTemplate | None = None,
request_chain: Chain | None = None,
llm_chain_kwargs: dict | None = None,
verbose: bool = False, # noqa: FBT001,FBT002
headers: dict | None = None,
params: dict | None = None,
**kwargs: Any,
) -> SequentialChain:
r"""Create a chain for querying an API from a OpenAPI spec.
Note: this class is deprecated. See below for a replacement implementation.
The benefits of this implementation are:
- Uses LLM tool calling features to encourage properly-formatted API requests;
- Includes async support.
```python
from typing import Any
from langchain_classic.chains.openai_functions.openapi import openapi_spec_to_openai_fn
from langchain_community.utilities.openapi import OpenAPISpec
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
# Define API spec. Can be JSON or YAML
api_spec = \"\"\"
{
"openapi": "3.1.0",
"info": {
"title": "JSONPlaceholder API",
"version": "1.0.0"
},
"servers": [
{
"url": "https://jsonplaceholder.typicode.com"
}
],
"paths": {
"/posts": {
"get": {
"summary": "Get posts",
"parameters": [
{
"name": "_limit",
"in": "query",
"required": false,
"schema": {
"type": "integer",
"example": 2
},
"description": "Limit the number of results"
}
]
}
}
}
}
\"\"\"
parsed_spec = OpenAPISpec.from_text(api_spec)
openai_fns, call_api_fn = openapi_spec_to_openai_fn(parsed_spec)
tools = [
{"type": "function", "function": fn}
for fn in openai_fns
]
prompt = ChatPromptTemplate.from_template(
"Use the provided APIs to respond to this user query:\\n\\n{query}"
)
model = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools)
def _execute_tool(message) -> Any:
if tool_calls := message.tool_calls:
tool_call = message.tool_calls[0]
response = call_api_fn(name=tool_call["name"], fn_args=tool_call["args"])
response.raise_for_status()
return response.json()
else:
return message.content
chain = prompt | model | _execute_tool
```
```python
response = chain.invoke({"query": "Get me top two posts."})
```
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
params: Request parameters.
headers: Request headers.
verbose: Whether to run the chain in verbose mode.
llm_chain_kwargs: LLM chain additional keyword arguments.
**kwargs: Additional keyword arguments to pass to the chain.
""" # noqa: E501
try:
from langchain_community.utilities.openapi import OpenAPISpec
except ImportError as e:
msg = (
"Could not import langchain_community.utilities.openapi. "
"Please install it with `pip install langchain-community`."
)
raise ImportError(msg) from e
if isinstance(spec, str):
for conversion in (
OpenAPISpec.from_url,
OpenAPISpec.from_file,
OpenAPISpec.from_text,
):
try:
spec = conversion(spec)
break
except ImportError:
raise
except Exception: # noqa: BLE001
_logger.debug(
"Parse spec failed for OpenAPISpec.%s",
conversion.__name__,
exc_info=True,
)
if isinstance(spec, str):
msg = f"Unable to parse spec from source {spec}"
raise ValueError(msg) # noqa: TRY004
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
if not llm:
msg = (
"Must provide an LLM for this chain.For example,\n"
"from langchain_openai import ChatOpenAI\n"
"model = ChatOpenAI()\n"
)
raise ValueError(msg)
prompt = prompt or ChatPromptTemplate.from_template(
"Use the provided API's to respond to this user query:\n\n{query}",
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs={"functions": openai_fns},
output_parser=JsonOutputFunctionsParser(args_only=False),
output_key="function",
verbose=verbose,
**(llm_chain_kwargs or {}),
)
request_chain = request_chain or SimpleRequestChain(
request_method=lambda name, args: call_api_fn(
name,
args,
headers=headers,
params=params,
),
verbose=verbose,
)
return SequentialChain(
chains=[llm_chain, request_chain],
input_variables=llm_chain.input_keys,
output_variables=["response"],
verbose=verbose,
**kwargs,
)
| SimpleRequestChain |
python | chroma-core__chroma | chromadb/test/configurations/test_collection_configuration.py | {
"start": 894,
"end": 1135
} | class ____(EmbeddingFunction[Embeddable]):
def __init__(self) -> None:
pass
def __call__(self, input: Embeddable) -> Embeddings:
return cast(Embeddings, np.array([[1.0, 2.0]], dtype=np.float32))
| LegacyEmbeddingFunction |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 198119,
"end": 201408
} | class ____:
@pytest.mark.parametrize(
("action", "method", "pretty_method"),
[
("added", "totp", "TOTP"),
("removed", "totp", "TOTP"),
("added", "webauthn", "WebAuthn"),
("removed", "webauthn", "WebAuthn"),
],
)
def test_two_factor_email(
self,
pyramid_request,
pyramid_config,
monkeypatch,
action,
method,
pretty_method,
):
stub_user = pretend.stub(
id="id",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
subject_renderer = pyramid_config.testing_add_renderer(
f"email/two-factor-{action}/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
f"email/two-factor-{action}/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
f"email/two-factor-{action}/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=stub_user.id)
)
),
)
pyramid_request.user = stub_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
send_method = getattr(email, f"send_two_factor_{action}_email")
result = send_method(pyramid_request, stub_user, method=method)
assert result == {"method": pretty_method, "username": stub_user.username}
subject_renderer.assert_()
body_renderer.assert_(method=pretty_method, username=stub_user.username)
html_renderer.assert_(method=pretty_method, username=stub_user.username)
assert pyramid_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{stub_user.username} <{stub_user.email}>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": stub_user.email,
"subject": "Email Subject",
"redact_ip": False,
},
},
)
]
| TestTwoFactorEmail |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/dagster/development_to_production/resources/resources_v2.py | {
"start": 64,
"end": 734
} | class ____:
"""Hacker News Client that returns fake data."""
def __init__(self):
self.data = {
1: {
"id": 1,
"type": "comment",
"title": "the first comment",
"by": "user1",
},
2: {"id": 2, "type": "story", "title": "an awesome story", "by": "user2"},
}
def fetch_item_by_id(self, item_id: int) -> Optional[dict[str, Any]]:
return self.data.get(item_id)
def fetch_max_item_id(self) -> int:
return 2
@property
def item_field_names(self) -> list:
return ["id", "type", "title", "by"]
# end_mock
| StubHNClient |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 6595,
"end": 6950
} | class ____(PrefectException):
"""
Raised when the client receives a 403 (forbidden) from the API due to reaching an object limit (e.g. maximum number of deployments).
"""
def __init__(self, http_exc: Exception, *args: Any, **kwargs: Any) -> None:
self.http_exc = http_exc
super().__init__(*args, **kwargs)
| ObjectLimitReached |
python | pyca__cryptography | src/cryptography/hazmat/primitives/serialization/pkcs12.py | {
"start": 1147,
"end": 5104
} | class ____:
def __init__(
self,
key: PrivateKeyTypes | None,
cert: PKCS12Certificate | None,
additional_certs: list[PKCS12Certificate],
):
if key is not None and not isinstance(
key,
(
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
),
):
raise TypeError(
"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448"
" private key, or None."
)
if cert is not None and not isinstance(cert, PKCS12Certificate):
raise TypeError("cert must be a PKCS12Certificate object or None")
if not all(
isinstance(add_cert, PKCS12Certificate)
for add_cert in additional_certs
):
raise TypeError(
"all values in additional_certs must be PKCS12Certificate"
" objects"
)
self._key = key
self._cert = cert
self._additional_certs = additional_certs
@property
def key(self) -> PrivateKeyTypes | None:
return self._key
@property
def cert(self) -> PKCS12Certificate | None:
return self._cert
@property
def additional_certs(self) -> list[PKCS12Certificate]:
return self._additional_certs
def __eq__(self, other: object) -> bool:
if not isinstance(other, PKCS12KeyAndCertificates):
return NotImplemented
return (
self.key == other.key
and self.cert == other.cert
and self.additional_certs == other.additional_certs
)
def __hash__(self) -> int:
return hash((self.key, self.cert, tuple(self.additional_certs)))
def __repr__(self) -> str:
fmt = (
"<PKCS12KeyAndCertificates(key={}, cert={}, additional_certs={})>"
)
return fmt.format(self.key, self.cert, self.additional_certs)
load_key_and_certificates = rust_pkcs12.load_key_and_certificates
load_pkcs12 = rust_pkcs12.load_pkcs12
_PKCS12CATypes = typing.Union[
x509.Certificate,
PKCS12Certificate,
]
def serialize_java_truststore(
certs: Iterable[PKCS12Certificate],
encryption_algorithm: serialization.KeySerializationEncryption,
) -> bytes:
if not certs:
raise ValueError("You must supply at least one cert")
if not isinstance(
encryption_algorithm, serialization.KeySerializationEncryption
):
raise TypeError(
"Key encryption algorithm must be a "
"KeySerializationEncryption instance"
)
return rust_pkcs12.serialize_java_truststore(certs, encryption_algorithm)
def serialize_key_and_certificates(
name: bytes | None,
key: PKCS12PrivateKeyTypes | None,
cert: x509.Certificate | None,
cas: Iterable[_PKCS12CATypes] | None,
encryption_algorithm: serialization.KeySerializationEncryption,
) -> bytes:
if key is not None and not isinstance(
key,
(
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
),
):
raise TypeError(
"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448"
" private key, or None."
)
if not isinstance(
encryption_algorithm, serialization.KeySerializationEncryption
):
raise TypeError(
"Key encryption algorithm must be a "
"KeySerializationEncryption instance"
)
if key is None and cert is None and not cas:
raise ValueError("You must supply at least one of key, cert, or cas")
return rust_pkcs12.serialize_key_and_certificates(
name, key, cert, cas, encryption_algorithm
)
| PKCS12KeyAndCertificates |
python | doocs__leetcode | lcof2/剑指 Offer II 118. 多余的边/Solution.py | {
"start": 0,
"end": 371
} | class ____:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
p = list(range(1010))
for a, b in edges:
if find(a) == find(b):
return [a, b]
p[find(a)] = find(b)
return []
| Solution |
python | sympy__sympy | sympy/physics/quantum/state.py | {
"start": 7555,
"end": 9602
} | class ____(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
kind = KetKind
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product between this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_from_right_to(self, op, **options):
"""Apply an Operator to this Ket as Operator*Ket
This method will dispatch to methods having the format::
``def _apply_from_right_to_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how to implement OperatorName*Ket
Parameters
==========
op : Operator
The Operator that is acting on the Ket as op*Ket
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_from_right_to', op, **options)
| KetBase |
python | kubernetes-client__python | kubernetes/client/models/v1_api_group.py | {
"start": 383,
"end": 10450
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str',
'preferred_version': 'V1GroupVersionForDiscovery',
'server_address_by_client_cid_rs': 'list[V1ServerAddressByClientCIDR]',
'versions': 'list[V1GroupVersionForDiscovery]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name',
'preferred_version': 'preferredVersion',
'server_address_by_client_cid_rs': 'serverAddressByClientCIDRs',
'versions': 'versions'
}
def __init__(self, api_version=None, kind=None, name=None, preferred_version=None, server_address_by_client_cid_rs=None, versions=None, local_vars_configuration=None): # noqa: E501
"""V1APIGroup - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._name = None
self._preferred_version = None
self._server_address_by_client_cid_rs = None
self._versions = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.name = name
if preferred_version is not None:
self.preferred_version = preferred_version
if server_address_by_client_cid_rs is not None:
self.server_address_by_client_cid_rs = server_address_by_client_cid_rs
self.versions = versions
@property
def api_version(self):
"""Gets the api_version of this V1APIGroup. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1APIGroup. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1APIGroup.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1APIGroup. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1APIGroup. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1APIGroup. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1APIGroup.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1APIGroup. # noqa: E501
:type: str
"""
self._kind = kind
@property
def name(self):
"""Gets the name of this V1APIGroup. # noqa: E501
name is the name of the group. # noqa: E501
:return: The name of this V1APIGroup. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1APIGroup.
name is the name of the group. # noqa: E501
:param name: The name of this V1APIGroup. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def preferred_version(self):
"""Gets the preferred_version of this V1APIGroup. # noqa: E501
:return: The preferred_version of this V1APIGroup. # noqa: E501
:rtype: V1GroupVersionForDiscovery
"""
return self._preferred_version
@preferred_version.setter
def preferred_version(self, preferred_version):
"""Sets the preferred_version of this V1APIGroup.
:param preferred_version: The preferred_version of this V1APIGroup. # noqa: E501
:type: V1GroupVersionForDiscovery
"""
self._preferred_version = preferred_version
@property
def server_address_by_client_cid_rs(self):
"""Gets the server_address_by_client_cid_rs of this V1APIGroup. # noqa: E501
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. # noqa: E501
:return: The server_address_by_client_cid_rs of this V1APIGroup. # noqa: E501
:rtype: list[V1ServerAddressByClientCIDR]
"""
return self._server_address_by_client_cid_rs
@server_address_by_client_cid_rs.setter
def server_address_by_client_cid_rs(self, server_address_by_client_cid_rs):
"""Sets the server_address_by_client_cid_rs of this V1APIGroup.
a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. # noqa: E501
:param server_address_by_client_cid_rs: The server_address_by_client_cid_rs of this V1APIGroup. # noqa: E501
:type: list[V1ServerAddressByClientCIDR]
"""
self._server_address_by_client_cid_rs = server_address_by_client_cid_rs
@property
def versions(self):
"""Gets the versions of this V1APIGroup. # noqa: E501
versions are the versions supported in this group. # noqa: E501
:return: The versions of this V1APIGroup. # noqa: E501
:rtype: list[V1GroupVersionForDiscovery]
"""
return self._versions
@versions.setter
def versions(self, versions):
"""Sets the versions of this V1APIGroup.
versions are the versions supported in this group. # noqa: E501
:param versions: The versions of this V1APIGroup. # noqa: E501
:type: list[V1GroupVersionForDiscovery]
"""
if self.local_vars_configuration.client_side_validation and versions is None: # noqa: E501
raise ValueError("Invalid value for `versions`, must not be `None`") # noqa: E501
self._versions = versions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1APIGroup):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1APIGroup):
return True
return self.to_dict() != other.to_dict()
| V1APIGroup |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 40506,
"end": 41021
} | class ____:
def test_unbordered(self, unpadded_pt: PrettyTable) -> None:
unpadded_pt.border = False
result = unpadded_pt.get_string()
expected = """
abc
def
g..
"""
assert result.strip() == expected.strip()
def test_bordered(self, unpadded_pt: PrettyTable) -> None:
unpadded_pt.border = True
result = unpadded_pt.get_string()
expected = """
+-+-+-+
|a|b|c|
|d|e|f|
|g|.|.|
+-+-+-+
"""
assert result.strip() == expected.strip()
| TestUnpaddedTable |
python | ray-project__ray | python/ray/serve/_private/benchmarks/common.py | {
"start": 4767,
"end": 5018
} | class ____:
def __init__(self, child):
logging.getLogger("ray.serve").setLevel(logging.WARNING)
self._child = child
async def __call__(self, *args, **kwargs):
return await self._child.remote()
@serve.deployment
| ModelComp |
python | google__pytype | pytype/overlays/flax_overlay.py | {
"start": 2245,
"end": 3590
} | class ____(dataclass_overlay.Dataclass):
"""Dataclass with automatic 'name' and 'parent' members."""
def _add_implicit_field(self, node, cls_locals, key, typ):
if key in cls_locals:
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.frames,
None,
name=key,
details=f"flax.linen.Module defines field '{key}' implicitly",
)
default = typ.to_variable(node)
cls_locals[key] = abstract_utils.Local(node, None, typ, default, self.ctx)
def get_class_locals(self, node, cls):
cls_locals = super().get_class_locals(node, cls)
initvar = self.ctx.convert.lookup_value("dataclasses", "InitVar")
def make_initvar(t):
return abstract.ParameterizedClass(
initvar, {abstract_utils.T: t}, self.ctx
)
name_type = make_initvar(self.ctx.convert.str_type)
# TODO(mdemello): Fill in the parent type properly
parent_type = make_initvar(self.ctx.convert.unsolvable)
self._add_implicit_field(node, cls_locals, "name", name_type)
self._add_implicit_field(node, cls_locals, "parent", parent_type)
return cls_locals
def decorate(self, node, cls):
super().decorate(node, cls)
if not isinstance(cls, abstract.InterpreterClass):
return
cls.members["replace"] = classgen.make_replace_method(self.ctx, node, cls)
| ModuleDataclass |
python | doocs__leetcode | solution/0000-0099/0058.Length of Last Word/Solution.py | {
"start": 0,
"end": 235
} | class ____:
def lengthOfLastWord(self, s: str) -> int:
i = len(s) - 1
while i >= 0 and s[i] == ' ':
i -= 1
j = i
while j >= 0 and s[j] != ' ':
j -= 1
return i - j
| Solution |
python | paramiko__paramiko | paramiko/kex_group14.py | {
"start": 1731,
"end": 1833
} | class ____(KexGroup14):
name = "diffie-hellman-group14-sha256"
hash_algo = sha256
| KexGroup14SHA256 |
python | mlflow__mlflow | .claude/hooks/lint.py | {
"start": 365,
"end": 560
} | class ____:
file: Path
line: int
column: int
message: str
def __str__(self) -> str:
return f"{self.file}:{self.line}:{self.column}: {self.message}"
@dataclass
| LintError |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_registration.py | {
"start": 6066,
"end": 12631
} | class ____:
"""Registry for ONNX functions.
The registry maintains a mapping from qualified names to symbolic functions under a
fixed opset version. It supports registering custom onnx-script functions and for
dispatcher to dispatch calls to the appropriate function.
"""
def __init__(self) -> None:
"""Initializes the registry"""
self._opset_version = _constants.TORCHLIB_OPSET
self.functions: dict[TorchOp | str, list[OnnxDecompMeta]] = {}
@property
def opset_version(self) -> int:
"""The ONNX opset version the exporter should target."""
return self._opset_version
@classmethod
def from_torchlib(cls, opset_version=_constants.TORCHLIB_OPSET) -> ONNXRegistry:
"""Populates the registry with ATen functions from torchlib.
Args:
torchlib_registry: The torchlib registry to use for populating the registry.
"""
registry = cls()
registry._opset_version = opset_version
for meta in _torchlib_registry.get_torchlib_ops():
registry._register(meta.fx_target, meta)
# TODO(justinchuby): Remove this once torchlib is migrated to PyTorch
torchlib_ops = onnxscript_apis.get_torchlib_ops()
for torchlib_meta in torchlib_ops:
qualified_name = torchlib_meta.qualified_name
overload_func = torchlib_meta.function
try:
# NOTE: This is heavily guarded with try-except because we don't want
# to fail the entire registry population if one function fails.
target = _get_overload(qualified_name)
if target is None:
continue
meta = OnnxDecompMeta(
onnx_function=overload_func,
fx_target=target,
signature=None,
is_custom=False,
is_complex=torchlib_meta.is_complex,
)
registry._register(target, meta)
except Exception:
logger.exception("Failed to register '%s'. Skipped", qualified_name)
continue
registry._cleanup_registry_based_on_opset_version()
return registry
def _register(
self,
target: TorchOp,
onnx_decomposition: OnnxDecompMeta,
) -> None:
"""Registers a OnnxDecompMeta to an operator.
Args:
target: The PyTorch node callable target.
onnx_decomposition: The OnnxDecompMeta to register.
"""
target_or_name: str | TorchOp
if isinstance(target, torch._ops.OpOverload):
# Get the qualified name of the aten op because torch._ops.OpOverload lookup in
# a dictionary is unreliable for some reason.
target_or_name = target.name()
else:
target_or_name = target
if onnx_decomposition.is_custom:
self.functions.setdefault(target_or_name, []).insert(0, onnx_decomposition)
else:
self.functions.setdefault(target_or_name, []).append(onnx_decomposition)
def register_op(
self,
target: TorchOp,
function: Callable,
is_complex: bool = False,
) -> None:
"""Registers a custom operator: torch.ops.<namespace>.<op_name>.<overload>.
Args:
target: The PyTorch node callable target.
function: The onnx-script function to register.
is_complex: Whether the function is a function that handles complex valued inputs.
"""
if isinstance(target, torch._ops.OpOverloadPacket):
raise TypeError(
f"Target '{target}' should be provided as an OpOverload instead of an "
"OpOverloadPacket. You can get the default overload with "
"<op>.default"
)
self._register(
target,
OnnxDecompMeta(
onnx_function=function,
fx_target=target,
signature=None,
is_custom=True,
is_complex=is_complex,
),
)
def get_decomps(self, target: TorchOp) -> list[OnnxDecompMeta]:
"""Returns a list of OnnxDecompMeta for the given op: torch.ops.<namespace>.<op_name>.<overload>.
The list is ordered by the time of registration. The custom operators should come
first in the list.
Args:
target: The PyTorch node callable target.
Returns:
A list of OnnxDecompMeta corresponding to the given name, or None if
the name is not in the registry.
"""
target_or_name: str | TorchOp
if isinstance(target, torch._ops.OpOverload):
# Get the qualified name of the aten op because torch._ops.OpOverload lookup in
# a dictionary is unreliable for some reason.
target_or_name = target.name()
else:
target_or_name = target
decomps = self.functions.get(target_or_name, [])
return sorted(decomps, key=lambda x: x.is_custom, reverse=True)
def is_registered(self, target: TorchOp) -> bool:
"""Returns whether the given op is registered: torch.ops.<namespace>.<op_name>.<overload>.
Args:
target: The PyTorch node callable target.
Returns:
True if the given op is registered, otherwise False.
"""
return bool(self.get_decomps(target))
def _cleanup_registry_based_on_opset_version(self) -> None:
"""Pick the implementation with the highest opset version valid until the current opset version."""
cleaned_functions = {}
for target_or_name, decomps in self.functions.items():
# Filter decompositions to only include those with opset_introduced <= opset_version
decomps = [d for d in decomps if d.opset_introduced <= self.opset_version]
# Keep only the decomposition with the highest opset_introduced
if decomps:
# Find the maximum opset_introduced
max_opset = max(d.opset_introduced for d in decomps)
# Keep all decompositions with the maximum opset_introduced
cleaned_functions[target_or_name] = [
d for d in decomps if d.opset_introduced == max_opset
]
self.functions = cleaned_functions
def __repr__(self) -> str:
return f"{self.__class__.__name__}(functions={self.functions})"
| ONNXRegistry |
python | graphql-python__graphene | graphene/types/json.py | {
"start": 123,
"end": 851
} | class ____(Scalar):
"""
Allows use of a JSON String for input / output from the GraphQL schema.
Use of this type is *not recommended* as you lose the benefits of having a defined, static
schema (one of the key benefits of GraphQL).
"""
@staticmethod
def serialize(dt):
return json.dumps(dt)
@staticmethod
def parse_literal(node, _variables=None):
if isinstance(node, StringValueNode):
try:
return json.loads(node.value)
except Exception as error:
raise ValueError(f"Badly formed JSONString: {str(error)}")
return Undefined
@staticmethod
def parse_value(value):
return json.loads(value)
| JSONString |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_json__property.py | {
"start": 1288,
"end": 2979
} | class ____:
def test_valid(self) -> None:
prop = bcpj.JSON()
assert prop.is_valid('[]')
assert prop.is_valid('[{"foo": 10}]')
def test_invalid(self) -> None:
prop = bcpj.JSON()
assert not prop.is_valid(None)
assert not prop.is_valid("")
assert not prop.is_valid("foo")
assert not prop.is_valid("[]]")
# json stickler for double quotes
assert not prop.is_valid("[{'foo': 10}]")
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpj.JSON()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpj.JSON()
assert str(prop) == "JSON"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpj, ALL)
| Test_JSON |
python | ray-project__ray | rllib/algorithms/dreamerv3/torch/models/components/reward_predictor_layer.py | {
"start": 407,
"end": 4482
} | class ____(nn.Module):
"""A layer outputting reward predictions using K bins and two-hot encoding.
This layer is used in two models in DreamerV3: The reward predictor of the world
model and the value function. K is 255 by default (see [1]) and doesn't change
with the model size.
Possible predicted reward/values range from symexp(-20.0) to symexp(20.0), which
should cover any possible environment. Outputs of this layer are generated by
generating logits/probs via a single linear layer, then interpreting the probs
as weights for a weighted average of the different possible reward (binned) values.
"""
def __init__(
self,
*,
input_size: int,
num_buckets: int = 255,
lower_bound: float = -20.0,
upper_bound: float = 20.0,
):
"""Initializes a RewardPredictorLayer instance.
Args:
input_size: The input size of the reward predictor layer.
num_buckets: The number of buckets to create. Note that the number of
possible symlog'd outcomes from the used distribution is
`num_buckets` + 1:
lower_bound --bucket-- o[1] --bucket-- o[2] ... --bucket-- upper_bound
o=outcomes
lower_bound=o[0]
upper_bound=o[num_buckets]
lower_bound: The symlog'd lower bound for a possible reward value.
Note that a value of -20.0 here already allows individual (actual env)
rewards to be as low as -400M. Buckets will be created between
`lower_bound` and `upper_bound`.
upper_bound: The symlog'd upper bound for a possible reward value.
Note that a value of +20.0 here already allows individual (actual env)
rewards to be as high as 400M. Buckets will be created between
`lower_bound` and `upper_bound`.
"""
self.num_buckets = num_buckets
super().__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.reward_buckets_layer = nn.Linear(
in_features=input_size, out_features=self.num_buckets, bias=True
)
nn.init.zeros_(self.reward_buckets_layer.weight)
nn.init.zeros_(self.reward_buckets_layer.bias)
# self.reward_buckets_layer.weight.data.fill_(0.0)
# self.reward_buckets_layer.bias.data.fill_(0.0)
def forward(self, inputs, return_logits=False):
"""Computes the expected reward using N equal sized buckets of possible values.
Args:
inputs: The input tensor for the layer, which computes the reward bucket
weights (logits). [B, dim].
return_logits: Whether to return the logits over the reward buckets
as a second return value (besides the expected reward).
Returns:
The expected reward OR a tuple consisting of the expected reward and the
torch `FiniteDiscrete` distribution object. To get the individual bucket
probs, do `[FiniteDiscrete object].probs`.
"""
# Compute the `num_buckets` weights.
logits = self.reward_buckets_layer(inputs)
# Compute the expected(!) reward using the formula:
# `softmax(Linear(x))` [vectordot] `possible_outcomes`, where
# `possible_outcomes` is the even-spaced (binned) encoding of all possible
# symexp'd reward/values.
probs = F.softmax(logits, dim=-1)
possible_outcomes = torch.linspace(
self.lower_bound, self.upper_bound, self.num_buckets, device=logits.device
)
# probs=possible_outcomes=[B, `num_buckets`]
# Simple vector dot product (over last dim) to get the mean reward
# weighted sum, where all weights sum to 1.0.
expected_rewards = torch.sum(probs * possible_outcomes, dim=-1)
# expected_rewards=[B]
if return_logits:
return expected_rewards, logits
return expected_rewards
| RewardPredictorLayer |
python | getsentry__sentry | tests/sentry/monitors/test_validators.py | {
"start": 1293,
"end": 12437
} | class ____(MonitorTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.request = RequestFactory().get("/")
self.request.user = self.user
access = MagicMock()
access.has_any_project_scope.return_value = True
self.request.access = access
self.context = {
"organization": self.organization,
"access": access,
"request": self.request,
}
@patch("sentry.analytics.record")
def test_simple(self, mock_record):
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"owner": f"user:{self.user.id}",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.organization_id == self.organization.id
assert monitor.project_id == self.project.id
assert monitor.name == "My Monitor"
assert monitor.status == ObjectStatus.ACTIVE
assert monitor.owner_user_id == self.user.id
assert monitor.owner_team_id is None
assert monitor.config == {
"schedule_type": ScheduleType.CRONTAB,
"schedule": "0 0 * * *",
"checkin_margin": None,
"max_runtime": None,
"failure_issue_threshold": None,
"recovery_threshold": None,
}
assert_any_analytics_event(
mock_record,
CronMonitorCreated(
user_id=self.user.id,
organization_id=self.organization.id,
project_id=self.project.id,
from_upsert=False,
),
)
assert_any_analytics_event(
mock_record,
FirstCronMonitorCreated(
user_id=self.user.id,
organization_id=self.organization.id,
project_id=self.project.id,
from_upsert=False,
),
)
def test_slug(self):
data = {
"project": self.project.slug,
"name": "My Monitor",
"slug": "my-monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.slug == "my-monitor"
def test_invalid_numeric_slug(self):
data = {
"project": self.project.slug,
"name": "My Monitor",
"slug": "1234",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert not validator.is_valid()
assert "slug" in validator.errors
assert validator.errors["slug"][0] == DEFAULT_SLUG_ERROR_MESSAGE
def test_generated_slug_not_entirely_numeric(self):
data = {
"project": self.project.slug,
"name": "1234",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.slug.startswith("1234-")
assert not monitor.slug.isdecimal()
def test_crontab_whitespace(self):
data = {
"project": self.project.slug,
"name": "1234",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": " *\t* * * * "},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.config["schedule"] == "* * * * *"
@override_settings(MAX_MONITORS_PER_ORG=2)
def test_monitor_organization_limit(self):
for i in range(settings.MAX_MONITORS_PER_ORG):
Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
name=f"Unicron-{i}",
slug=f"unicron-{i}",
config={
"schedule_type": ScheduleType.CRONTAB,
"schedule": "0 0 * * *",
},
)
data = {
"project": self.project.slug,
"name": f"Unicron-{settings.MAX_MONITORS_PER_ORG + 1}",
"slug": f"unicron-{settings.MAX_MONITORS_PER_ORG + 1}",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert not validator.is_valid()
assert validator.errors["nonFieldErrors"] == [
ErrorDetail(
f"You may not exceed {settings.MAX_MONITORS_PER_ORG} monitors per organization",
code="invalid",
)
]
def test_simple_with_alert_rule(self):
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
"alert_rule": {
"environment": self.environment.name,
"targets": [{"targetIdentifier": self.user.id, "targetType": "Member"}],
},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
alert_rule_id = monitor.config["alert_rule_id"]
rule = Rule.objects.get(
project_id=monitor.project_id, id=alert_rule_id, source=RuleSource.CRON_MONITOR
)
assert rule is not None
assert rule.environment_id == self.environment.id
def test_checkin_margin_zero(self):
# Invalid checkin margin
#
# XXX(epurkhiser): We currently transform 0 -> 1 for backwards
# compatability. If we remove the custom transformer in the config
# validator this test will change to a validation error test.
data = {
"project": self.project.slug,
"name": "My Monitor",
"slug": "cron_job",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily", "checkin_margin": 0},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.config["checkin_margin"] == 1
@patch("sentry.quotas.backend.assign_seat")
def test_create_monitor_assigns_seat(self, assign_seat):
assign_seat.return_value = Outcome.ACCEPTED
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assign_seat.assert_called_with(DataCategory.MONITOR_SEAT, monitor)
assert monitor.status == ObjectStatus.ACTIVE
@patch("sentry.quotas.backend.assign_seat")
def test_create_monitor_without_seat(self, assign_seat):
assign_seat.return_value = Outcome.RATE_LIMITED
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert assign_seat.called
monitor.refresh_from_db()
assert monitor.status == ObjectStatus.DISABLED
# Verify the detector is also disabled when quota is exceeded
detector = get_detector_for_monitor(monitor)
assert detector is not None
assert detector.enabled is False
def test_invalid_schedule(self):
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
# There is no February 31st
"config": {"schedule_type": "crontab", "schedule": "0 0 31 2 *"},
}
validator = MonitorValidator(data=data, context=self.context)
assert not validator.is_valid()
assert "config" in validator.errors
assert "schedule" in validator.errors["config"]
assert validator.errors["config"]["schedule"][0] == "Schedule is invalid"
def test_create_with_owner_team(self):
"""Test creating a monitor with a team owner."""
team = self.create_team(organization=self.organization)
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"owner": f"team:{team.id}",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.owner_user_id is None
assert monitor.owner_team_id == team.id
def test_create_with_status_disabled(self):
"""Test creating a monitor with disabled status."""
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"status": "disabled",
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
assert monitor.status == ObjectStatus.DISABLED
def test_create_with_is_muted_noop(self):
"""Test that creating a monitor with is_muted does nothing.
Since is_muted is computed from MonitorEnvironment.is_muted, setting is_muted=True
during monitor creation has no effect because there are no environments yet.
A monitor with no environments is always considered unmuted.
To mute a monitor, you must use the update API after the monitor has environments.
"""
data = {
"project": self.project.slug,
"name": "My Monitor",
"type": "cron_job",
"isMuted": True, # This has no effect on creation
"config": {"schedule_type": "crontab", "schedule": "@daily"},
}
validator = MonitorValidator(data=data, context=self.context)
assert validator.is_valid()
monitor = validator.save()
# Monitor has no environments, so is_muted returns False regardless of input
assert is_monitor_muted(monitor) is False
| MonitorValidatorCreateTest |
python | sqlalchemy__sqlalchemy | test/engine/test_transaction.py | {
"start": 39639,
"end": 51683
} | class ____(fixtures.TestBase):
"""see also sqlalchemy/testing/suite/test_dialect.py::IsolationLevelTest
this suite has sparse_backend so wont take place
for every dbdriver under a nox run. the suite test should cover
that end of it
"""
__requires__ = ("isolation_level",)
__sparse_driver_backend__ = True
def _default_isolation_level(self):
return testing.requires.get_isolation_levels(testing.config)["default"]
def _non_default_isolation_level(self):
levels = testing.requires.get_isolation_levels(testing.config)
default = levels["default"]
supported = levels["supported"]
s = set(supported).difference(["AUTOCOMMIT", default])
if s:
return s.pop()
else:
assert False, "no non-default isolation level available"
def test_engine_param_stays(self):
eng = testing_engine()
with eng.connect() as conn:
isolation_level = eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
)
level = self._non_default_isolation_level()
ne_(isolation_level, level)
eng = testing_engine(options=dict(isolation_level=level))
with eng.connect() as conn:
eq_(
eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
),
level,
)
# check that it stays
with eng.connect() as conn:
eq_(
eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
),
level,
)
with eng.connect() as conn:
eq_(
eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
),
level,
)
def test_default_level(self):
eng = testing_engine(options=dict())
with eng.connect() as conn:
isolation_level = eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
)
eq_(isolation_level, self._default_isolation_level())
def test_reset_level(self):
eng = testing_engine(options=dict())
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection.dbapi_connection),
self._default_isolation_level(),
)
eng.dialect.set_isolation_level(
conn.connection.dbapi_connection,
self._non_default_isolation_level(),
)
eq_(
eng.dialect.get_isolation_level(conn.connection.dbapi_connection),
self._non_default_isolation_level(),
)
eng.dialect.reset_isolation_level(conn.connection.dbapi_connection)
eq_(
eng.dialect.get_isolation_level(conn.connection.dbapi_connection),
self._default_isolation_level(),
)
conn.close()
def test_reset_level_with_setting(self):
eng = testing_engine(
options=dict(isolation_level=self._non_default_isolation_level())
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection.dbapi_connection),
self._non_default_isolation_level(),
)
eng.dialect.set_isolation_level(
conn.connection.dbapi_connection, self._default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection.dbapi_connection),
self._default_isolation_level(),
)
eng.dialect.reset_isolation_level(conn.connection.dbapi_connection)
eq_(
eng.dialect.get_isolation_level(conn.connection.dbapi_connection),
self._non_default_isolation_level(),
)
conn.close()
def test_underscore_replacement(self, connection_no_trans):
conn = connection_no_trans
with (
mock.patch.object(conn.dialect, "set_isolation_level") as mock_sil,
mock.patch.object(
conn.dialect,
"_gen_allowed_isolation_levels",
mock.Mock(return_value=["READ COMMITTED", "REPEATABLE READ"]),
),
):
conn.execution_options(isolation_level="REPEATABLE_READ")
dbapi_conn = conn.connection.dbapi_connection
eq_(mock_sil.mock_calls, [mock.call(dbapi_conn, "REPEATABLE READ")])
def test_casing_replacement(self, connection_no_trans):
conn = connection_no_trans
with (
mock.patch.object(conn.dialect, "set_isolation_level") as mock_sil,
mock.patch.object(
conn.dialect,
"_gen_allowed_isolation_levels",
mock.Mock(return_value=["READ COMMITTED", "REPEATABLE READ"]),
),
):
conn.execution_options(isolation_level="repeatable_read")
dbapi_conn = conn.connection.dbapi_connection
eq_(mock_sil.mock_calls, [mock.call(dbapi_conn, "REPEATABLE READ")])
def test_dialect_doesnt_follow_naming_guidelines(
self, connection_no_trans
):
conn = connection_no_trans
conn.dialect.__dict__.pop("_gen_allowed_isolation_levels", None)
with mock.patch.object(
conn.dialect,
"get_isolation_level_values",
mock.Mock(
return_value=[
"READ COMMITTED",
"REPEATABLE_READ",
"serializable",
]
),
):
with expect_raises_message(
ValueError,
f"Dialect {conn.dialect.name!r} "
r"get_isolation_level_values\(\) method "
r"should "
r"return names as UPPERCASE using spaces, not underscores; "
r"got \['REPEATABLE_READ', 'serializable'\]",
):
conn.execution_options(isolation_level="READ COMMITTED")
def test_invalid_level_engine_param(self):
eng = testing_engine(options=dict(isolation_level="FOO"))
assert_raises_message(
exc.ArgumentError,
f"Invalid value 'FOO' for isolation_level. "
f"Valid isolation levels for {eng.dialect.name!r} are "
f"""{', '.join(
testing.requires.get_isolation_levels(
testing.config
)['supported']
)}""",
eng.connect,
)
def test_invalid_level_execution_option(self):
eng = testing_engine(
options=dict(execution_options=dict(isolation_level="FOO"))
)
assert_raises_message(
exc.ArgumentError,
f"Invalid value 'FOO' for isolation_level. "
f"Valid isolation levels for {eng.dialect.name!r} are "
f"""{', '.join(
testing.requires.get_isolation_levels(
testing.config
)['supported']
)}""",
eng.connect,
)
def test_connection_invalidated(self):
eng = testing_engine()
with eng.connect() as conn:
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2.invalidate()
c2.connection
# TODO: do we want to rebuild the previous isolation?
# for now, this is current behavior so we will leave it.
eq_(c2.get_isolation_level(), self._default_isolation_level())
def test_per_connection(self):
eng = testing_engine(
options=dict(
poolclass=(
QueuePool
if not testing.db.dialect.is_async
else AsyncAdaptedQueuePool
),
pool_size=2,
max_overflow=0,
)
)
c1 = eng.connect()
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c1.connection.dbapi_connection),
self._non_default_isolation_level(),
)
eq_(
eng.dialect.get_isolation_level(c2.connection.dbapi_connection),
self._default_isolation_level(),
)
c1.close()
c2.close()
c3 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c3.connection.dbapi_connection),
self._default_isolation_level(),
)
c4 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c4.connection.dbapi_connection),
self._default_isolation_level(),
)
c3.close()
c4.close()
def test_exception_in_transaction(self):
eng = testing_engine()
with eng.connect() as c1:
with expect_raises_message(
exc.InvalidRequestError,
r"This connection has already initialized a SQLAlchemy "
r"Transaction\(\) object via begin\(\) or autobegin; "
r"isolation_level may not be altered unless rollback\(\) or "
r"commit\(\) is called first.",
):
with c1.begin():
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
# was never set, so we are on original value
eq_(
eng.dialect.get_isolation_level(
c1.connection.dbapi_connection
),
self._default_isolation_level(),
)
def test_per_statement_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
select(1).execution_options,
isolation_level=self._non_default_isolation_level(),
)
def test_per_engine(self):
# new in 0.9
eng = testing_engine(
testing.db.url,
options=dict(
execution_options={
"isolation_level": self._non_default_isolation_level()
}
),
)
with eng.connect() as conn:
eq_(
eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
),
self._non_default_isolation_level(),
)
def test_per_option_engine(self):
eng = testing_engine(testing.db.url).execution_options(
isolation_level=self._non_default_isolation_level()
)
with eng.connect() as conn:
eq_(
eng.dialect.get_isolation_level(
conn.connection.dbapi_connection
),
self._non_default_isolation_level(),
)
def test_isolation_level_accessors_connection_default(self):
eng = testing_engine(testing.db.url)
with eng.connect() as conn:
eq_(conn.default_isolation_level, self._default_isolation_level())
with eng.connect() as conn:
eq_(conn.get_isolation_level(), self._default_isolation_level())
def test_isolation_level_accessors_connection_option_modified(self):
eng = testing_engine(testing.db.url)
with eng.connect() as conn:
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level()
)
eq_(conn.default_isolation_level, self._default_isolation_level())
eq_(
conn.get_isolation_level(), self._non_default_isolation_level()
)
eq_(c2.get_isolation_level(), self._non_default_isolation_level())
| IsolationLevelTest |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 16035,
"end": 16111
} | class ____(TestGetInterrupt):
kind = queue.Channel
| TestGetInterruptChannel |
python | django__django | tests/files/tests.py | {
"start": 10522,
"end": 12160
} | class ____(unittest.TestCase):
"""
get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of a file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper:
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(
os.path.join(os.path.dirname(__file__), "test1.png")
)
finally:
del images.open
self.assertTrue(FileWrapper._closed)
| DimensionClosingBug |
python | joke2k__faker | tests/providers/test_job.py | {
"start": 2833,
"end": 3022
} | class ____:
"""Test fr_FR job provider"""
def test_job(self, faker, num_samples):
for _ in range(num_samples):
assert faker.job() in FrFrJobProvider.jobs
| TestFrFr |
python | mitmproxy__pdoc | pdoc/__init__.py | {
"start": 10568,
"end": 10635
} | class ____(BaseModel):
a: int
"""Docs for field a."""
| OtherFoo |
python | getsentry__sentry | src/sentry/api/bases/organization.py | {
"start": 4879,
"end": 5283
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin", "project:releases", "org:ci"],
"POST": ["project:write", "project:admin", "project:releases", "org:ci"],
"PUT": ["project:write", "project:admin", "project:releases", "org:ci"],
"DELETE": ["project:admin", "project:releases"],
}
| OrganizationReleasePermission |
python | mlflow__mlflow | mlflow/genai/scorers/base.py | {
"start": 1575,
"end": 1843
} | class ____:
"""Configuration for registered scorer sampling."""
sample_rate: float | None = None
filter_string: str | None = None
AggregationFunc = Callable[[list[float]], float] # List of per-row value -> aggregated value
@dataclass
| ScorerSamplingConfig |
python | django__django | django/core/serializers/pyyaml.py | {
"start": 594,
"end": 1245
} | class ____(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar("tag:yaml.org,2002:str", str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping("tag:yaml.org,2002:map", data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(
collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict
)
# Workaround to represent dictionaries in insertion order.
# See https://github.com/yaml/pyyaml/pull/143.
DjangoSafeDumper.add_representer(dict, DjangoSafeDumper.represent_ordered_dict)
| DjangoSafeDumper |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias18.py | {
"start": 334,
"end": 512
} | class ____(Generic[T1]):
pass
A_Alias_1: TypeAlias = A[T2]
A_Alias_2: TypeAlias = A_Alias_1[T2 | int]
# This should generate an error because the variance is incompatible.
| A |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header03.py | {
"start": 315,
"end": 993
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header03.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_footer(
"&L&P", {"scale_with_doc": False, "align_with_margins": False}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | jazzband__tablib | src/tablib/formats/_rst.py | {
"start": 633,
"end": 9226
} | class ____:
title = 'rst'
extensions = ('rst',)
MAX_TABLE_WIDTH = 80 # Roughly. It may be wider to avoid breaking words.
@classmethod
def _get_column_string_lengths(cls, dataset):
"""
Returns a list of string lengths of each column, and a list of
maximum word lengths.
"""
if dataset.headers:
column_lengths = [[len(h)] for h in dataset.headers]
word_lens = [_max_word_len(h) for h in dataset.headers]
else:
column_lengths = [[] for _ in range(dataset.width)]
word_lens = [0 for _ in range(dataset.width)]
for row in dataset.dict:
values = iter(row.values() if hasattr(row, 'values') else row)
for i, val in enumerate(values):
text = to_str(val)
column_lengths[i].append(len(text))
word_lens[i] = max(word_lens[i], _max_word_len(text))
return column_lengths, word_lens
@classmethod
def _row_to_lines(cls, values, widths, wrapper, sep='|', justify=JUSTIFY_LEFT):
"""
Returns a table row of wrapped values as a list of lines
"""
if justify not in JUSTIFY_VALUES:
raise ValueError('Value of "justify" must be one of "{}"'.format(
'", "'.join(JUSTIFY_VALUES)
))
def just(text_, width_):
if justify == JUSTIFY_LEFT:
return text_.ljust(width_)
elif justify == JUSTIFY_CENTER:
return text_.center(width_)
else:
return text_.rjust(width_)
lpad = sep + ' ' if sep else ''
rpad = ' ' + sep if sep else ''
pad = ' ' + sep + ' '
cells = []
for value, width in zip(values, widths):
wrapper.width = width
text = to_str(value)
cell = wrapper.wrap(text)
cells.append(cell)
lines = zip_longest(*cells, fillvalue='')
lines = (
(just(cell_line, widths[i]) for i, cell_line in enumerate(line))
for line in lines
)
lines = [''.join((lpad, pad.join(line), rpad)) for line in lines]
return lines
@classmethod
def _get_column_widths(cls, dataset, max_table_width=MAX_TABLE_WIDTH, pad_len=3):
"""
Returns a list of column widths proportional to the median length
of the text in their cells.
"""
str_lens, word_lens = cls._get_column_string_lengths(dataset)
median_lens = [int(median(lens)) for lens in str_lens]
total = sum(median_lens)
if total > max_table_width - (pad_len * len(median_lens)):
column_widths = (max_table_width * lens // total for lens in median_lens)
else:
column_widths = (lens for lens in median_lens)
# Allow for separator and padding:
column_widths = (w - pad_len if w > pad_len else w for w in column_widths)
# Rather widen table than break words:
column_widths = [max(w, l) for w, l in zip(column_widths, word_lens)]
return column_widths
@classmethod
def export_set_as_simple_table(cls, dataset, column_widths=None):
"""
Returns reStructuredText grid table representation of dataset.
"""
lines = []
wrapper = TextWrapper()
if column_widths is None:
column_widths = cls._get_column_widths(dataset, pad_len=2)
border = ' '.join(['=' * w for w in column_widths])
lines.append(border)
if dataset.headers:
lines.extend(cls._row_to_lines(
dataset.headers,
column_widths,
wrapper,
sep='',
justify=JUSTIFY_CENTER,
))
lines.append(border)
for row in dataset.dict:
values = iter(row.values() if hasattr(row, 'values') else row)
lines.extend(cls._row_to_lines(values, column_widths, wrapper, ''))
lines.append(border)
return '\n'.join(lines)
@classmethod
def export_set_as_grid_table(cls, dataset, column_widths=None):
"""
Returns reStructuredText grid table representation of dataset.
>>> from tablib import Dataset
>>> from tablib.formats import registry
>>> bits = ((0, 0), (1, 0), (0, 1), (1, 1))
>>> data = Dataset()
>>> data.headers = ['A', 'B', 'A and B']
>>> for a, b in bits:
... data.append([bool(a), bool(b), bool(a * b)])
>>> rst = registry.get_format('rst')
>>> print(rst.export_set(data, force_grid=True))
+-------+-------+-------+
| A | B | A and |
| | | B |
+=======+=======+=======+
| False | False | False |
+-------+-------+-------+
| True | False | False |
+-------+-------+-------+
| False | True | False |
+-------+-------+-------+
| True | True | True |
+-------+-------+-------+
"""
lines = []
wrapper = TextWrapper()
if column_widths is None:
column_widths = cls._get_column_widths(dataset)
header_sep = '+=' + '=+='.join(['=' * w for w in column_widths]) + '=+'
row_sep = '+-' + '-+-'.join(['-' * w for w in column_widths]) + '-+'
lines.append(row_sep)
if dataset.headers:
lines.extend(cls._row_to_lines(
dataset.headers,
column_widths,
wrapper,
justify=JUSTIFY_CENTER,
))
lines.append(header_sep)
for row in dataset.dict:
values = iter(row.values() if hasattr(row, 'values') else row)
lines.extend(cls._row_to_lines(values, column_widths, wrapper))
lines.append(row_sep)
return '\n'.join(lines)
@classmethod
def _use_simple_table(cls, head0, col0, width0):
"""
Use a simple table if the text in the first column is never wrapped
>>> from tablib.formats import registry
>>> rst = registry.get_format('rst')
>>> rst._use_simple_table('menu', ['egg', 'bacon'], 10)
True
>>> rst._use_simple_table(None, ['lobster thermidor', 'spam'], 10)
False
"""
if head0 is not None:
head0 = to_str(head0)
if len(head0) > width0:
return False
for cell in col0:
cell = to_str(cell)
if len(cell) > width0:
return False
return True
@classmethod
def export_set(cls, dataset, **kwargs):
"""
Returns reStructuredText table representation of dataset.
Returns a simple table if the text in the first column is never
wrapped, otherwise returns a grid table.
>>> from tablib import Dataset
>>> bits = ((0, 0), (1, 0), (0, 1), (1, 1))
>>> data = Dataset()
>>> data.headers = ['A', 'B', 'A and B']
>>> for a, b in bits:
... data.append([bool(a), bool(b), bool(a * b)])
>>> table = data.rst
>>> table.split('\\n') == [
... '===== ===== =====',
... ' A B A and',
... ' B ',
... '===== ===== =====',
... 'False False False',
... 'True False False',
... 'False True False',
... 'True True True ',
... '===== ===== =====',
... ]
True
"""
if not dataset.dict:
return ''
force_grid = kwargs.get('force_grid', False)
max_table_width = kwargs.get('max_table_width', cls.MAX_TABLE_WIDTH)
column_widths = cls._get_column_widths(dataset, max_table_width)
use_simple_table = cls._use_simple_table(
dataset.headers[0] if dataset.headers else None,
dataset.get_col(0),
column_widths[0],
)
if use_simple_table and not force_grid:
return cls.export_set_as_simple_table(dataset, column_widths)
else:
return cls.export_set_as_grid_table(dataset, column_widths)
@classmethod
def export_book(cls, databook):
"""
reStructuredText representation of a Databook.
Tables are separated by a blank line. All tables use the grid
format.
"""
return '\n\n'.join(cls.export_set(dataset, force_grid=True)
for dataset in databook._datasets)
| ReSTFormat |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr.py | {
"start": 32588,
"end": 37329
} | class ____(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, config: RTDetrConfig, num_heads: int, n_points: int):
super().__init__()
self.attn = MultiScaleDeformableAttention()
if config.d_model % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}"
)
dim_per_head = config.d_model // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in RTDetrMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 64
self.d_model = config.d_model
self.n_levels = config.num_feature_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
self.value_proj = nn.Linear(config.d_model, config.d_model)
self.output_proj = nn.Linear(config.d_model, config.d_model)
self.disable_custom_kernels = config.disable_custom_kernels
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
total_elements = sum(height * width for height, width in spatial_shapes_list)
if total_elements != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = F.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
num_coordinates = reference_points.shape[-1]
if num_coordinates == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif num_coordinates == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
output = self.attn(
value,
spatial_shapes,
spatial_shapes_list,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
output = self.output_proj(output)
return output, attention_weights
| RTDetrMultiscaleDeformableAttention |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_florida_zip.py | {
"start": 1743,
"end": 4078
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Florida zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_florida_zip": ["32807", "33039", "33325", "34134"],
"invalid_florida_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_florida_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_florida_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_florida_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidFloridaZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidFloridaZip |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/legacy_storage.py | {
"start": 14755,
"end": 31384
} | class ____(EventLogStorage, ConfigurableClass):
def __init__(self, storage: DagsterStorage, inst_data: Optional[ConfigurableClassData] = None):
self._storage = check.inst_param(storage, "storage", DagsterStorage)
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
super().__init__()
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return {
"module_name": str,
"class_name": str,
"config_yaml": str,
}
@classmethod
def from_config_value(
cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, str]
) -> "LegacyEventLogStorage":
storage = ConfigurableClassData(
module_name=config_value["module_name"],
class_name=config_value["class_name"],
config_yaml=config_value["config_yaml"],
).rehydrate(as_type=DagsterStorage)
# Type checker says LegacyEventStorage is abstract and can't be instantiated. Not sure whether
# type check is wrong, or is unused code path.
return LegacyEventLogStorage(storage, inst_data=inst_data)
@property
def _instance(self) -> Optional["DagsterInstance"]: # pyright: ignore[reportIncompatibleMethodOverride]
return self._storage._instance # noqa: SLF001
def index_connection(self):
return self._storage.event_log_storage.index_connection() # pyright: ignore[reportAttributeAccessIssue]
def register_instance(self, instance: "DagsterInstance") -> None:
if not self._storage.has_instance:
self._storage.register_instance(instance)
def get_logs_for_run( # pyright: ignore[reportIncompatibleMethodOverride]
self,
run_id: str,
cursor: Optional[Union[str, int]] = None,
of_type: Optional[Union["DagsterEventType", set["DagsterEventType"]]] = None,
limit: Optional[int] = None,
ascending: bool = True,
) -> Iterable["EventLogEntry"]:
return self._storage.event_log_storage.get_logs_for_run(
run_id, cursor, of_type, limit, ascending
)
def get_logs_for_all_runs_by_log_id(
self,
after_cursor: int = -1,
dagster_event_type: Optional[Union["DagsterEventType", set["DagsterEventType"]]] = None,
limit: Optional[int] = None,
) -> Mapping[int, "EventLogEntry"]:
return self._storage.event_log_storage.get_logs_for_all_runs_by_log_id(
after_cursor=after_cursor, dagster_event_type=dagster_event_type, limit=limit
)
def get_maximum_record_id(self) -> Optional[int]:
return self._storage.event_log_storage.get_maximum_record_id()
def get_stats_for_run(self, run_id: str) -> "DagsterRunStatsSnapshot":
return self._storage.event_log_storage.get_stats_for_run(run_id)
def get_step_stats_for_run(
self, run_id: str, step_keys: Optional[Sequence[str]] = None
) -> Sequence["RunStepKeyStatsSnapshot"]:
return self._storage.event_log_storage.get_step_stats_for_run(run_id, step_keys)
def store_event(self, event: "EventLogEntry") -> None:
return self._storage.event_log_storage.store_event(event)
def delete_events(self, run_id: str) -> None:
return self._storage.event_log_storage.delete_events(run_id)
def upgrade(self) -> None:
return self._storage.event_log_storage.upgrade()
def reindex_events(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:
return self._storage.event_log_storage.reindex_events(print_fn, force)
def reindex_assets(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:
return self._storage.event_log_storage.reindex_assets(print_fn, force)
def wipe(self) -> None:
return self._storage.event_log_storage.wipe()
def watch(self, run_id: str, cursor: str, callback: EventHandlerFn) -> None: # pyright: ignore[reportIncompatibleMethodOverride]
return self._storage.event_log_storage.watch(run_id, cursor, callback)
def end_watch(self, run_id: str, handler: EventHandlerFn) -> None:
return self._storage.event_log_storage.end_watch(run_id, handler)
@property
def is_persistent(self) -> bool:
return self._storage.event_log_storage.is_persistent
def dispose(self) -> None:
return self._storage.event_log_storage.dispose()
def optimize_for_webserver(
self, statement_timeout: int, pool_recycle: int, max_overflow: int
) -> None:
return self._storage.event_log_storage.optimize_for_webserver(
statement_timeout,
pool_recycle,
max_overflow,
)
def get_event_records( # pyright: ignore[reportIncompatibleMethodOverride]
self,
event_records_filter: Optional[EventRecordsFilter] = None,
limit: Optional[int] = None,
ascending: bool = False,
) -> Iterable[EventLogRecord]:
# type ignored because `get_event_records` does not accept None. Unclear which type
# annotation is wrong.
return self._storage.event_log_storage.get_event_records(
event_records_filter, # type: ignore
limit,
ascending,
)
def fetch_materializations( # pyright: ignore[reportIncompatibleMethodOverride]
self,
filters: Union[AssetKey, "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
return self._storage.event_log_storage.fetch_materializations(
filters, limit, cursor, ascending
)
def fetch_failed_materializations(
self,
records_filter: Union[AssetKey, "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
return self._storage.event_log_storage.fetch_failed_materializations(
records_filter, limit, cursor, ascending
)
def fetch_observations( # pyright: ignore[reportIncompatibleMethodOverride]
self,
filters: Union[AssetKey, "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
return self._storage.event_log_storage.fetch_observations(filters, limit, cursor, ascending)
def fetch_run_status_changes( # pyright: ignore[reportIncompatibleMethodOverride]
self,
filters: Union["DagsterEventType", "RunStatusChangeRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
return self._storage.event_log_storage.fetch_run_status_changes(
filters, limit, cursor, ascending
)
def get_latest_planned_materialization_info(
self,
asset_key: AssetKey,
partition: Optional[str] = None,
) -> Optional[PlannedMaterializationInfo]:
return self._storage.event_log_storage.get_latest_planned_materialization_info(
asset_key, partition
)
def get_updated_data_version_partitions(
self, asset_key: AssetKey, partitions: Iterable[str], since_storage_id: int
) -> set[str]:
return self._storage.event_log_storage.get_updated_data_version_partitions(
asset_key, partitions, since_storage_id
)
def get_asset_records( # pyright: ignore[reportIncompatibleMethodOverride]
self, asset_keys: Optional[Sequence["AssetKey"]] = None
) -> Iterable[AssetRecord]:
return self._storage.event_log_storage.get_asset_records(asset_keys)
def get_freshness_state_records(
self, keys: Sequence["AssetKey"]
) -> Mapping["AssetKey", FreshnessStateRecord]:
return self._storage.event_log_storage.get_freshness_state_records(keys)
def get_asset_check_summary_records(
self, asset_check_keys: Sequence["AssetCheckKey"]
) -> Mapping["AssetCheckKey", AssetCheckSummaryRecord]:
return self._storage.event_log_storage.get_asset_check_summary_records(asset_check_keys)
def has_asset_key(self, asset_key: "AssetKey") -> bool:
return self._storage.event_log_storage.has_asset_key(asset_key)
def all_asset_keys(self) -> Iterable["AssetKey"]: # pyright: ignore[reportIncompatibleMethodOverride]
return self._storage.event_log_storage.all_asset_keys()
def get_asset_keys( # pyright: ignore[reportIncompatibleMethodOverride]
self,
prefix: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
) -> Iterable["AssetKey"]:
return self._storage.event_log_storage.get_asset_keys(prefix, limit, cursor)
def get_latest_materialization_events(
self, asset_keys: Iterable["AssetKey"]
) -> Mapping["AssetKey", Optional["EventLogEntry"]]:
return self._storage.event_log_storage.get_latest_materialization_events(asset_keys)
def wipe_asset(self, asset_key: "AssetKey") -> None:
return self._storage.event_log_storage.wipe_asset(asset_key)
def wipe_asset_partitions(self, asset_key: AssetKey, partition_keys: Sequence[str]) -> None:
"""Remove asset index history from event log for given asset partitions."""
raise NotImplementedError(
"Partitioned asset wipe is not supported yet for this event log storage."
)
def get_materialized_partitions(
self,
asset_key: AssetKey,
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
) -> set[str]:
return self._storage.event_log_storage.get_materialized_partitions(
asset_key, before_cursor, after_cursor
)
def get_latest_storage_id_by_partition(
self,
asset_key: "AssetKey",
event_type: "DagsterEventType",
partitions: Optional[set[str]] = None,
) -> Mapping[str, int]:
return self._storage.event_log_storage.get_latest_storage_id_by_partition(
asset_key, event_type, partitions
)
def get_latest_tags_by_partition(
self,
asset_key: "AssetKey",
event_type: "DagsterEventType",
tag_keys: Sequence[str],
asset_partitions: Optional[Sequence[str]] = None,
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
) -> Mapping[str, Mapping[str, str]]:
return self._storage.event_log_storage.get_latest_tags_by_partition(
asset_key, event_type, tag_keys, asset_partitions, before_cursor, after_cursor
)
def get_latest_asset_partition_materialization_attempts_without_materializations(
self, asset_key: "AssetKey", after_storage_id: Optional[int] = None
) -> Mapping[str, tuple[str, int]]:
return self._storage.event_log_storage.get_latest_asset_partition_materialization_attempts_without_materializations(
asset_key, after_storage_id
)
def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:
return self._storage.event_log_storage.get_dynamic_partitions(partitions_def_name)
def get_paginated_dynamic_partitions(
self, partitions_def_name: str, limit: int, ascending: bool, cursor: Optional[str] = None
) -> PaginatedResults[str]:
return self._storage.event_log_storage.get_paginated_dynamic_partitions(
partitions_def_name=partitions_def_name, limit=limit, ascending=ascending, cursor=cursor
)
def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:
return self._storage.event_log_storage.has_dynamic_partition(
partitions_def_name, partition_key
)
def add_dynamic_partitions(
self, partitions_def_name: str, partition_keys: Sequence[str]
) -> None:
return self._storage.event_log_storage.add_dynamic_partitions(
partitions_def_name, partition_keys
)
def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:
return self._storage.event_log_storage.delete_dynamic_partition(
partitions_def_name, partition_key
)
def get_event_tags_for_asset(
self,
asset_key: "AssetKey",
filter_tags: Optional[Mapping[str, str]] = None,
filter_event_id: Optional[int] = None,
) -> Sequence[Mapping[str, str]]:
return self._storage.event_log_storage.get_event_tags_for_asset(
asset_key, filter_tags, filter_event_id
)
def can_read_asset_status_cache(self) -> bool:
return self._storage.event_log_storage.can_read_asset_status_cache()
def can_write_asset_status_cache(self) -> bool:
return self._storage.event_log_storage.can_write_asset_status_cache()
def wipe_asset_cached_status(self, asset_key: "AssetKey") -> None:
return self._storage.event_log_storage.wipe_asset_cached_status(asset_key)
def update_asset_cached_status_data(
self, asset_key: "AssetKey", cache_values: "AssetStatusCacheValue"
) -> None:
self._storage.event_log_storage.update_asset_cached_status_data(
asset_key=asset_key, cache_values=cache_values
)
def get_records_for_run(
self,
run_id: str,
cursor: Optional[str] = None,
of_type: Optional[Union["DagsterEventType", set["DagsterEventType"]]] = None,
limit: Optional[int] = None,
ascending: bool = True,
) -> EventLogConnection:
return self._storage.event_log_storage.get_records_for_run(
run_id, cursor, of_type, limit, ascending
)
def initialize_concurrency_limit_to_default(self, concurrency_key: str) -> bool:
return self._storage.event_log_storage.initialize_concurrency_limit_to_default(
concurrency_key
)
def set_concurrency_slots(self, concurrency_key: str, num: int) -> None:
return self._storage.event_log_storage.set_concurrency_slots(concurrency_key, num)
def delete_concurrency_limit(self, concurrency_key: str) -> None:
return self._storage.event_log_storage.delete_concurrency_limit(concurrency_key)
def get_concurrency_keys(self) -> set[str]:
return self._storage.event_log_storage.get_concurrency_keys()
def get_pool_limits(self) -> Sequence[PoolLimit]:
return self._storage.event_log_storage.get_pool_limits()
def get_concurrency_info(self, concurrency_key: str) -> ConcurrencyKeyInfo:
return self._storage.event_log_storage.get_concurrency_info(concurrency_key)
def claim_concurrency_slot(
self, concurrency_key: str, run_id: str, step_key: str, priority: Optional[int] = None
) -> ConcurrencyClaimStatus:
return self._storage.event_log_storage.claim_concurrency_slot(
concurrency_key, run_id, step_key, priority
)
def check_concurrency_claim(self, concurrency_key: str, run_id: str, step_key: str):
return self._storage.event_log_storage.check_concurrency_claim(
concurrency_key, run_id, step_key
)
def get_concurrency_run_ids(self) -> set[str]:
return self._storage.event_log_storage.get_concurrency_run_ids()
def free_concurrency_slots_for_run(self, run_id: str) -> None:
return self._storage.event_log_storage.free_concurrency_slots_for_run(run_id)
def free_concurrency_slot_for_step(self, run_id: str, step_key: str) -> None:
return self._storage.event_log_storage.free_concurrency_slot_for_step(run_id, step_key)
def get_asset_check_execution_history(
self,
check_key: "AssetCheckKey",
limit: int,
cursor: Optional[int] = None,
status: Optional[AbstractSet[AssetCheckExecutionRecordStatus]] = None,
) -> Sequence[AssetCheckExecutionRecord]:
return self._storage.event_log_storage.get_asset_check_execution_history(
check_key=check_key,
limit=limit,
cursor=cursor,
status=status,
)
def get_latest_asset_check_execution_by_key( # pyright: ignore[reportIncompatibleMethodOverride]
self,
check_keys: Sequence["AssetCheckKey"],
) -> Mapping["AssetCheckKey", Optional[AssetCheckExecutionRecord]]:
return self._storage.event_log_storage.get_latest_asset_check_execution_by_key(check_keys)
| LegacyEventLogStorage |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 12573,
"end": 13168
} | class ____(graphene.InputObjectType):
parentRunId = graphene.NonNull(graphene.String)
strategy = graphene.NonNull(GrapheneReexecutionStrategy)
extraTags = graphene.List(
graphene.NonNull(GrapheneExecutionTag),
description="""When re-executing a single run, pass new tags which will upsert over tags on the parent run.""",
)
useParentRunTags = graphene.Boolean(
description="""When re-executing a single run, pass false to avoid adding the parent run tags by default."""
)
class Meta:
name = "ReexecutionParams"
| GrapheneReexecutionParams |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 69382,
"end": 70878
} | class ____(Constraint):
"""
For clients: no explicit constraint; constraint is whatever is implicitly
inferred by guards from tracing.
For backends: there must exist at least TWO possible values for the
size at this dimension which satisfy the guards for this dimension.
In other words, this constraint helps us distinguish between "we don't
care if this dimension specializes or not" versus "this dimension must be
unspecialized." However, this constraint doesn't say very much about what
specialization is permitted; for example, if we guard on a size being
even, this would still be acceptable under an unspec constraint. This
makes RelaxedUnspecConstraint useful for eager mode, where your backend compiler
may add constraints to otherwise dynamic dimensions; we can't assert that
there are NO guards as this is brittle because compilers should be able to
add extra constraints. If you want to assert that there are no guards,
use StrictMinMaxConstraint with an unbounded ValueRanges.
"""
def render(self, source: Source) -> str:
return f"RelaxedUnspecConstraint({source.name()})"
# NB: None here indicates the client constraint is whatever is implicitly
# inferred by guards from tracing, and that a backend can add whatever guards
# it wants (including fully specializing the value).
DimConstraint = Union[StrictMinMaxConstraint, RelaxedUnspecConstraint, None]
@dataclass(frozen=True)
| RelaxedUnspecConstraint |
python | scipy__scipy | scipy/sparse/tests/test_sputils.py | {
"start": 294,
"end": 16486
} | class ____:
def test_upcast(self):
assert_equal(sputils.upcast('intc'), np.intc)
assert_equal(sputils.upcast('int32', 'float32'), np.float64)
assert_equal(sputils.upcast('bool', complex, float), np.complex128)
assert_equal(sputils.upcast('i', 'd'), np.float64)
def test_getdtype(self):
A = np.array([1], dtype='int8')
assert_equal(sputils.getdtype(None, default=float), float)
assert_equal(sputils.getdtype(None, a=A), np.int8)
with assert_raises(
ValueError,
match="scipy.sparse does not support dtype object. .*",
):
sputils.getdtype("O")
with assert_raises(
ValueError,
match="scipy.sparse does not support dtype float16. .*",
):
sputils.getdtype(None, default=np.float16)
def test_isscalarlike(self):
assert_equal(sputils.isscalarlike(3.0), True)
assert_equal(sputils.isscalarlike(-4), True)
assert_equal(sputils.isscalarlike(2.5), True)
assert_equal(sputils.isscalarlike(1 + 3j), True)
assert_equal(sputils.isscalarlike(np.array(3)), True)
assert_equal(sputils.isscalarlike("16"), True)
assert_equal(sputils.isscalarlike(np.array([3])), False)
assert_equal(sputils.isscalarlike([[3]]), False)
assert_equal(sputils.isscalarlike((1,)), False)
assert_equal(sputils.isscalarlike((1, 2)), False)
def test_isintlike(self):
assert_equal(sputils.isintlike(-4), True)
assert_equal(sputils.isintlike(np.array(3)), True)
assert_equal(sputils.isintlike(np.array([3])), False)
with assert_raises(
ValueError,
match="Inexact indices into sparse matrices are not allowed"
):
sputils.isintlike(3.0)
assert_equal(sputils.isintlike(2.5), False)
assert_equal(sputils.isintlike(1 + 3j), False)
assert_equal(sputils.isintlike((1,)), False)
assert_equal(sputils.isintlike((1, 2)), False)
def test_isshape(self):
assert_equal(sputils.isshape((1, 2)), True)
assert_equal(sputils.isshape((5, 2)), True)
assert_equal(sputils.isshape((1.5, 2)), False)
assert_equal(sputils.isshape((2, 2, 2)), False)
assert_equal(sputils.isshape(([2], 2)), False)
assert_equal(sputils.isshape((-1, 2), nonneg=False),True)
assert_equal(sputils.isshape((2, -1), nonneg=False),True)
assert_equal(sputils.isshape((-1, 2), nonneg=True),False)
assert_equal(sputils.isshape((2, -1), nonneg=True),False)
assert_equal(sputils.isshape((1.5, 2), allow_nd=(1, 2)), False)
assert_equal(sputils.isshape(([2], 2), allow_nd=(1, 2)), False)
assert_equal(sputils.isshape((2, 2, -2), nonneg=True, allow_nd=(1, 2)),
False)
assert_equal(sputils.isshape((2,), allow_nd=(1, 2)), True)
assert_equal(sputils.isshape((2, 2,), allow_nd=(1, 2)), True)
assert_equal(sputils.isshape((2, 2, 2), allow_nd=(1, 2)), False)
def test_issequence(self):
assert_equal(sputils.issequence((1,)), True)
assert_equal(sputils.issequence((1, 2, 3)), True)
assert_equal(sputils.issequence([1]), True)
assert_equal(sputils.issequence([1, 2, 3]), True)
assert_equal(sputils.issequence(np.array([1, 2, 3])), True)
assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False)
assert_equal(sputils.issequence(3), False)
def test_ismatrix(self):
assert_equal(sputils.ismatrix(((),)), True)
assert_equal(sputils.ismatrix([[1], [2]]), True)
assert_equal(sputils.ismatrix(np.arange(3)[None]), True)
assert_equal(sputils.ismatrix([1, 2]), False)
assert_equal(sputils.ismatrix(np.arange(3)), False)
assert_equal(sputils.ismatrix([[[1]]]), False)
assert_equal(sputils.ismatrix(3), False)
def test_isdense(self):
assert_equal(sputils.isdense(np.array([1])), True)
assert_equal(sputils.isdense(matrix([1])), True)
def test_validateaxis(self):
with assert_raises(ValueError, match="does not accept 0D axis"):
sputils.validateaxis(())
for ax in [1.5, (0, 1.5), (1.5, 0)]:
with assert_raises(TypeError, match="must be an integer"):
sputils.validateaxis(ax)
for ax in [(1, 1), (1, -1), (0, -2)]:
with assert_raises(ValueError, match="duplicate value in axis"):
sputils.validateaxis(ax)
# ndim 1
for ax in [1, -2, (0, 1), (1, -1)]:
with assert_raises(ValueError, match="out of range"):
sputils.validateaxis(ax, ndim=1)
with assert_raises(ValueError, match="duplicate value in axis"):
sputils.validateaxis((0, -1), ndim=1)
# all valid axis values lead to None when canonical
for axis in (0, -1, None, (0,), (-1,)):
assert sputils.validateaxis(axis, ndim=1) is None
# ndim 2
for ax in [5, -5, (0, 5), (-5, 0)]:
with assert_raises(ValueError, match="out of range"):
sputils.validateaxis(ax, ndim=2)
for axis in ((0,), (1,), None):
assert sputils.validateaxis(axis, ndim=2) == axis
axis_2d = {-2: (0,), -1: (1,), 0: (0,), 1: (1,), (0, 1): None, (0, -1): None}
for axis, canonical_axis in axis_2d.items():
assert sputils.validateaxis(axis, ndim=2) == canonical_axis
# ndim 4
for axis in ((2,), (3,), (2, 3), (2, 1), (0, 3)):
assert sputils.validateaxis(axis, ndim=4) == axis
axis_4d = {-4: (0,), -3: (1,), 2: (2,), 3: (3,), (3, -4): (3, 0)}
for axis, canonical_axis in axis_4d.items():
sputils.validateaxis(axis, ndim=4) == canonical_axis
@pytest.mark.parametrize("container", [csr_array, bsr_array])
def test_safely_cast_index_compressed(self, container):
# This is slow to test completely as nnz > imax is big
# and indptr is big for some shapes
# So we don't test large nnz, nor csc_array (same code as csr_array)
imax = np.int64(np.iinfo(np.int32).max)
# Shape 32bit
A32 = container((1, imax))
# indices big type, small values
B32 = A32.copy()
B32.indices = B32.indices.astype(np.int64)
B32.indptr = B32.indptr.astype(np.int64)
# Shape 64bit
# indices big type, small values
A64 = csr_array((1, imax + 1))
# indices small type, small values
B64 = A64.copy()
B64.indices = B64.indices.astype(np.int32)
B64.indptr = B64.indptr.astype(np.int32)
# indices big type, big values
C64 = A64.copy()
C64.indices = np.array([imax + 1], dtype=np.int64)
C64.indptr = np.array([0, 1], dtype=np.int64)
C64.data = np.array([2.2])
assert (A32.indices.dtype, A32.indptr.dtype) == (np.int32, np.int32)
assert (B32.indices.dtype, B32.indptr.dtype) == (np.int64, np.int64)
assert (A64.indices.dtype, A64.indptr.dtype) == (np.int64, np.int64)
assert (B64.indices.dtype, B64.indptr.dtype) == (np.int32, np.int32)
assert (C64.indices.dtype, C64.indptr.dtype) == (np.int64, np.int64)
for A in [A32, B32, A64, B64]:
indices, indptr = sputils.safely_cast_index_arrays(A, np.int32)
assert (indices.dtype, indptr.dtype) == (np.int32, np.int32)
indices, indptr = sputils.safely_cast_index_arrays(A, np.int64)
assert (indices.dtype, indptr.dtype) == (np.int64, np.int64)
indices, indptr = sputils.safely_cast_index_arrays(A, A.indices.dtype)
assert indices is A.indices
assert indptr is A.indptr
with assert_raises(ValueError):
sputils.safely_cast_index_arrays(C64, np.int32)
indices, indptr = sputils.safely_cast_index_arrays(C64, np.int64)
assert indices is C64.indices
assert indptr is C64.indptr
def test_safely_cast_index_coo(self):
# This is slow to test completely as nnz > imax is big
# So we don't test large nnz
imax = np.int64(np.iinfo(np.int32).max)
# Shape 32bit
A32 = coo_array((1, imax))
# coords big type, small values
B32 = A32.copy()
B32.coords = tuple(co.astype(np.int64) for co in B32.coords)
# Shape 64bit
# coords big type, small values
A64 = coo_array((1, imax + 1))
# coords small type, small values
B64 = A64.copy()
B64.coords = tuple(co.astype(np.int32) for co in B64.coords)
# coords big type, big values
C64 = A64.copy()
C64.coords = (np.array([imax + 1]), np.array([0]))
C64.data = np.array([2.2])
assert A32.coords[0].dtype == np.int32
assert B32.coords[0].dtype == np.int64
assert A64.coords[0].dtype == np.int64
assert B64.coords[0].dtype == np.int32
assert C64.coords[0].dtype == np.int64
for A in [A32, B32, A64, B64]:
coords = sputils.safely_cast_index_arrays(A, np.int32)
assert coords[0].dtype == np.int32
coords = sputils.safely_cast_index_arrays(A, np.int64)
assert coords[0].dtype == np.int64
coords = sputils.safely_cast_index_arrays(A, A.coords[0].dtype)
assert coords[0] is A.coords[0]
with assert_raises(ValueError):
sputils.safely_cast_index_arrays(C64, np.int32)
coords = sputils.safely_cast_index_arrays(C64, np.int64)
assert coords[0] is C64.coords[0]
def test_safely_cast_index_dia(self):
# This is slow to test completely as nnz > imax is big
# So we don't test large nnz
imax = np.int64(np.iinfo(np.int32).max)
# Shape 32bit
A32 = dia_array((1, imax))
# offsets big type, small values
B32 = A32.copy()
B32.offsets = B32.offsets.astype(np.int64)
# Shape 64bit
# offsets big type, small values
A64 = dia_array((1, imax + 2))
# offsets small type, small values
B64 = A64.copy()
B64.offsets = B64.offsets.astype(np.int32)
# offsets big type, big values
C64 = A64.copy()
C64.offsets = np.array([imax + 1])
C64.data = np.array([2.2])
assert A32.offsets.dtype == np.int32
assert B32.offsets.dtype == np.int64
assert A64.offsets.dtype == np.int64
assert B64.offsets.dtype == np.int32
assert C64.offsets.dtype == np.int64
for A in [A32, B32, A64, B64]:
offsets = sputils.safely_cast_index_arrays(A, np.int32)
assert offsets.dtype == np.int32
offsets = sputils.safely_cast_index_arrays(A, np.int64)
assert offsets.dtype == np.int64
offsets = sputils.safely_cast_index_arrays(A, A.offsets.dtype)
assert offsets is A.offsets
with assert_raises(ValueError):
sputils.safely_cast_index_arrays(C64, np.int32)
offsets = sputils.safely_cast_index_arrays(C64, np.int64)
assert offsets is C64.offsets
def test_get_index_dtype(self):
imax = np.int64(np.iinfo(np.int32).max)
too_big = imax + 1
# Check that uint32's with no values too large doesn't return
# int64
a1 = np.ones(90, dtype='uint32')
a2 = np.ones(90, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int32')
)
# Check that if we can not convert but all values are less than or
# equal to max that we can just convert to int32
a1[-1] = imax
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int32')
)
# Check that if it can not convert directly and the contents are
# too large that we return int64
a1[-1] = too_big
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int64')
)
# test that if can not convert and didn't specify to check_contents
# we return int64
a1 = np.ones(89, dtype='uint32')
a2 = np.ones(89, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2))),
np.dtype('int64')
)
# Check that even if we have arrays that can be converted directly
# that if we specify a maxval directly it takes precedence
a1 = np.ones(12, dtype='uint32')
a2 = np.ones(12, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype(
(a1, a2), maxval=too_big, check_contents=True
)),
np.dtype('int64')
)
# Check that an array with a too max size and maxval set
# still returns int64
a1[-1] = too_big
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)),
np.dtype('int64')
)
# tests public broadcast_shapes largely from
# numpy/numpy/lib/tests/test_stride_tricks.py
# first 3 cause np.broadcast to raise index too large, but not sputils
@pytest.mark.parametrize("input_shapes,target_shape", [
[((6, 5, 1, 4, 1, 1), (1, 2**32), (2**32, 1)), (6, 5, 1, 4, 2**32, 2**32)],
[((6, 5, 1, 4, 1, 1), (1, 2**32)), (6, 5, 1, 4, 1, 2**32)],
[((1, 2**32), (2**32, 1)), (2**32, 2**32)],
[[2, 2, 2], (2,)],
[[], ()],
[[()], ()],
[[(7,)], (7,)],
[[(1, 2), (2,)], (1, 2)],
[[(2,), (1, 2)], (1, 2)],
[[(1, 1)], (1, 1)],
[[(1, 1), (3, 4)], (3, 4)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[(5, 6, 1)], (5, 6, 1)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
[[(1,), (3,)], (3,)],
[[2, (3, 2)], (3, 2)],
[[(1, 2)] * 32, (1, 2)],
[[(1, 2)] * 100, (1, 2)],
[[(2,)] * 32, (2,)],
])
def test_broadcast_shapes_successes(self, input_shapes, target_shape):
assert_equal(sputils.broadcast_shapes(*input_shapes), target_shape)
# tests public broadcast_shapes failures
@pytest.mark.parametrize("input_shapes", [
[(3,), (4,)],
[(2, 3), (2,)],
[2, (2, 3)],
[(3,), (3,), (4,)],
[(2, 5), (3, 5)],
[(2, 4), (2, 5)],
[(1, 3, 4), (2, 3, 3)],
[(1, 2), (3, 1), (3, 2), (10, 5)],
[(2,)] * 32 + [(3,)] * 32,
])
def test_broadcast_shapes_failures(self, input_shapes):
with assert_raises(ValueError, match="cannot be broadcast"):
sputils.broadcast_shapes(*input_shapes)
def test_check_shape_overflow(self):
new_shape = sputils.check_shape([(10, -1)], (65535, 131070))
assert_equal(new_shape, (10, 858967245))
def test_matrix(self):
a = [[1, 2, 3]]
b = np.array(a)
assert isinstance(sputils.matrix(a), np.matrix)
assert isinstance(sputils.matrix(b), np.matrix)
c = sputils.matrix(b)
c[:, :] = 123
assert_equal(b, a)
c = sputils.matrix(b, copy=False)
c[:, :] = 123
assert_equal(b, [[123, 123, 123]])
def test_asmatrix(self):
a = [[1, 2, 3]]
b = np.array(a)
assert isinstance(sputils.asmatrix(a), np.matrix)
assert isinstance(sputils.asmatrix(b), np.matrix)
c = sputils.asmatrix(b)
c[:, :] = 123
assert_equal(b, [[123, 123, 123]])
| TestSparseUtils |
python | huggingface__transformers | src/transformers/trainer_utils.py | {
"start": 5045,
"end": 6310
} | class ____:
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
inputs (`np.ndarray`, *optional*): Input data passed to the model.
losses (`np.ndarray`, *optional*): Loss values computed during evaluation.
"""
def __init__(
self,
predictions: np.ndarray | tuple[np.ndarray],
label_ids: np.ndarray | tuple[np.ndarray],
inputs: np.ndarray | tuple[np.ndarray] | None = None,
losses: np.ndarray | tuple[np.ndarray] | None = None,
):
self.predictions = predictions
self.label_ids = label_ids
self.inputs = inputs
self.losses = losses
self.elements = (self.predictions, self.label_ids)
if self.inputs is not None:
self.elements += (self.inputs,)
if self.losses is not None:
self.elements += (self.losses,)
def __iter__(self):
return iter(self.elements)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self.elements):
raise IndexError("tuple index out of range")
return self.elements[idx]
| EvalPrediction |
python | qdrant__qdrant-client | qdrant_client/embed/embedder.py | {
"start": 480,
"end": 652
} | class ____(BaseModel, Generic[T], arbitrary_types_allowed=True): # type: ignore[call-arg]
model: T
options: dict[str, Any]
deprecated: bool = False
| ModelInstance |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 28380,
"end": 29456
} | class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "test_complex"
additional_test_settings = {
"COMPRESS_OFFLINE_CONTEXT": {
"condition": "OK!",
# Django templating does not allow definition of tuples in the
# templates.
# Make sure this is same as test_templates_jinja2/test_complex.
"my_names": ("js/one.js", "js/nonasc.js"),
}
}
def _test_offline(self, engine, verbosity=0):
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
self.assertEqual(3, count)
self.assertEqual(
[
self._render_script("76a82cfab9ab"),
self._render_script("7219642b8ab4"),
self._render_script("567bb77b13db"),
],
result,
)
rendered_template = self._render_template(engine)
self.assertEqual(
rendered_template, self._render_result([result[0], result[2]], "")
)
| OfflineCompressComplexTestCase |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 15349,
"end": 16271
} | class ____(OptionallyRequired[_IpAddressValue]):
"""
IpAddress Config Option.
Validate that an IP address is in an appropriate format
"""
def run_validation(self, value: object) -> _IpAddressValue:
if not isinstance(value, str) or ':' not in value:
raise ValidationError("Must be a string of format 'IP:PORT'")
host, port_str = value.rsplit(':', 1)
if host != 'localhost':
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
try:
# Validate and normalize IP Address
host = str(ipaddress.ip_address(host))
except ValueError as e:
raise ValidationError(e)
try:
port = int(port_str)
except Exception:
raise ValidationError(f"'{port_str}' is not a valid port")
return _IpAddressValue(host, port)
| IpAddress |
python | pytorch__pytorch | torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py | {
"start": 561,
"end": 640
} | class ____(Enum):
REENTRANT = auto()
NO_REENTRANT = auto()
| CheckpointImpl |
python | django-extensions__django-extensions | tests/management/commands/test_show_permissions.py | {
"start": 148,
"end": 3989
} | class ____(TestCase):
def _run_command(self, *args, **kwargs):
"""
Utility to run the command and return captured output.
"""
out = StringIO()
sys_stdout = sys.stdout
sys.stdout = out
try:
call_command("show_permissions", *args, **kwargs)
finally:
sys.stdout = sys_stdout
return out.getvalue()
def _check_header_in_output(self, app_labels, model_verbose, output):
"""
Accepts a list of app label variants and checks if any of them exists with the model_verbose in the output.
"""
for app_label in app_labels:
header = f"Permissions for {app_label} | {model_verbose}"
if header in output:
return
raise AssertionError(
f"None of the expected headers found in output. Tried: {[f'Permissions for {label} | {model_verbose}' for label in app_labels]}"
)
def test_should_list_permissions_for_all_apps_excluding_defaults(self):
output = self._run_command(verbosity=3)
auth_verbose = apps.get_app_config("auth").verbose_name
user_verbose = apps.get_model("auth", "user")._meta.verbose_name
self.assertNotIn(
f"Permissions for {auth_verbose} | {user_verbose}",
output,
"Should not list auth permissions without --all flag",
)
self.assertNotIn("auth.add_user", output)
def test_should_include_all_apps_with_flag(self):
output = self._run_command("--all", verbosity=3)
auth_config = apps.get_app_config("auth")
user_verbose = apps.get_model("auth", "user")._meta.verbose_name
self._check_header_in_output(
[auth_config.verbose_name, auth_config.label], user_verbose, output
)
self.assertIn("auth.add_user", output)
admin_config = apps.get_app_config("admin")
for model in admin_config.get_models():
model_verbose = model._meta.verbose_name
self._check_header_in_output(
[admin_config.verbose_name, admin_config.label], model_verbose, output
)
def test_should_filter_by_app_label(self):
output = self._run_command("--app-label", "auth", verbosity=3)
auth_config = apps.get_app_config("auth")
for model in auth_config.get_models():
model_verbose = model._meta.verbose_name
self._check_header_in_output(
[auth_config.verbose_name, auth_config.label], model_verbose, output
)
self.assertIn("auth.change_user", output)
def test_should_filter_by_app_and_model(self):
output = self._run_command("auth.user", verbosity=3)
auth_config = apps.get_app_config("auth")
user_verbose = apps.get_model("auth", "user")._meta.verbose_name
self._check_header_in_output(
[auth_config.verbose_name, auth_config.label], user_verbose, output
)
self.assertIn("auth.change_user", output)
def test_should_raise_error_for_invalid_model(self):
with self.assertRaisesMessage(
Exception, "Content type not found for 'fakeapp.nosuchmodel'"
):
self._run_command("fakeapp.nosuchmodel", verbosity=3)
def test_should_return_permissions_for_test_model(self):
if apps.is_installed("tests"):
output = self._run_command("tests.samplemodel", verbosity=3)
self.assertIn("tests.samplemodel", output.lower())
self.assertIn("can add", output.lower())
def test_should_raise_error_for_invalid_app_label(self):
with self.assertRaisesMessage(
Exception, 'No content types found for app label "noapp".'
):
self._run_command("--app-label", "noapp", verbosity=3)
| ShowPermissionsTests |
python | django__django | tests/annotations/models.py | {
"start": 1632,
"end": 1922
} | class ____(models.Model):
name = models.CharField(max_length=200)
motto = models.CharField(max_length=200, null=True, blank=True)
ticker_name = models.CharField(max_length=10, null=True, blank=True)
description = models.CharField(max_length=200, null=True, blank=True)
| Company |
python | ansible__ansible | test/units/parsing/vault/test_vault.py | {
"start": 15135,
"end": 16615
} | class ____(unittest.TestCase):
def test_file(self):
password = 'some password'
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(to_bytes(password))
tmp_file.close()
fake_loader = DictDataLoader({tmp_file.name: 'sdfadf'})
secret = vault.get_file_vault_secret(filename=tmp_file.name, loader=fake_loader)
secret.load()
os.unlink(tmp_file.name)
self.assertEqual(secret.bytes, to_bytes(password))
def test_file_not_a_directory(self):
filename = '/dev/null/foobar'
fake_loader = DictDataLoader({filename: 'sdfadf'})
self.assertRaisesRegex(errors.AnsibleError,
'.*The vault password file %s was not found.*' % filename,
vault.get_file_vault_secret,
filename=filename,
loader=fake_loader)
def test_file_not_found(self):
tmp_file = tempfile.NamedTemporaryFile()
filename = os.path.realpath(tmp_file.name)
tmp_file.close()
fake_loader = DictDataLoader({filename: 'sdfadf'})
self.assertRaisesRegex(errors.AnsibleError,
'.*The vault password file %s was not found.*' % filename,
vault.get_file_vault_secret,
filename=filename,
loader=fake_loader)
| TestGetFileVaultSecret |
python | cherrypy__cherrypy | cherrypy/_cpnative_server.py | {
"start": 264,
"end": 4661
} | class ____(cheroot.server.Gateway):
"""Native gateway implementation allowing to bypass WSGI."""
recursive = False
def respond(self):
"""Obtain response from CherryPy machinery and then send it."""
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr # FIXME: handle UNIX sockets
local = tonative(local[0]), local[1]
local = httputil.Host(local[0], local[1], '')
remote = tonative(req.conn.remote_addr), req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = tonative(req.scheme)
sn = cherrypy.tree.script_name(tonative(req.uri or '/'))
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = tonative(req.method)
path = tonative(req.path)
qs = tonative(req.qs or '')
headers = (
(tonative(h), tonative(v))
for h, v in req.inheaders.items()
)
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local,
remote,
scheme,
'HTTP/1.1',
)
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(
method,
path,
qs,
tonative(req.request_protocol),
headers,
rfile,
)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path,
)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status,
response.header_list,
response.body,
)
finally:
app.release_serving()
except Exception:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
"""Send response to HTTP request."""
req = self.req
# Set response status
req.status = status or b'500 Server Error'
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if req.ready and not req.sent_headers:
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
| NativeGateway |
python | great-expectations__great_expectations | great_expectations/expectations/expectation.py | {
"start": 73343,
"end": 80079
} | class ____(Expectation, ABC):
"""Base class for BatchExpectations.
BatchExpectations answer a semantic question about a Batch of data.
For example, `expect_table_column_count_to_equal` and `expect_table_row_count_to_equal` answer
how many columns and rows are in your table.
BatchExpectations must implement a `_validate(...)` method containing logic
for determining whether the Expectation is successfully validated.
Raises:
InvalidExpectationConfigurationError: The configuration does not contain the values required by the Expectation.
Args:
domain_keys (tuple): A tuple of the keys used to determine the domain of the
expectation.
""" # noqa: E501 # FIXME CoP
batch_id: Union[str, None] = None
domain_keys: ClassVar[Tuple[str, ...]] = ("batch_id",)
metric_dependencies: ClassVar[Tuple[str, ...]] = ()
domain_type: ClassVar[MetricDomainTypes] = MetricDomainTypes.TABLE
args_keys: ClassVar[Tuple[str, ...]] = ()
@pydantic.validator("row_condition", check_fields=False)
def _validate_row_condition(cls, v):
"""Validate row_condition according to GX Cloud UI constraints.
This validator applies to all subclasses that define a row_condition field.
check_fields=False allows this to work even though row_condition is not
defined on BatchExpectation itself.
"""
return validate_row_condition(v)
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[BatchExpectation]) -> None:
Expectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"domain_type": {
"title": "Domain Type",
"type": "string",
"const": model.domain_type,
"description": "Batch",
}
}
)
@override
def get_validation_dependencies(
self,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) -> ValidationDependencies:
validation_dependencies: ValidationDependencies = super().get_validation_dependencies(
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
metric_name: str
for metric_name in self.metric_dependencies:
metric_kwargs = get_metric_kwargs(
metric_name=metric_name,
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=metric_name,
metric_configuration=MetricConfiguration(
metric_name=metric_name,
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
return validation_dependencies
def _validate_metric_value_between( # noqa: C901, PLR0912 # FIXME CoP
self,
metric_name,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
) -> Dict[str, Union[bool, Dict[str, Any]]]:
metric_value: Optional[Any] = metrics.get(metric_name)
if metric_value is None:
return {"success": False, "result": {"observed_value": metric_value}}
# Obtaining components needed for validation
success_kwargs = self._get_success_kwargs()
min_value: Optional[Any] = success_kwargs.get("min_value")
strict_min: Optional[bool] = success_kwargs.get("strict_min")
max_value: Optional[Any] = success_kwargs.get("max_value")
strict_max: Optional[bool] = success_kwargs.get("strict_max")
if not isinstance(metric_value, datetime.datetime) and pd.isnull(metric_value):
return {"success": False, "result": {"observed_value": None}}
if isinstance(metric_value, datetime.datetime):
if isinstance(min_value, str):
try:
min_value = parse(min_value)
except TypeError:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"""Could not parse "min_value" of {min_value} (of type "{type(min_value)!s}) into datetime \
representation.""" # noqa: E501 # FIXME CoP
)
if isinstance(max_value, str):
try:
max_value = parse(max_value)
except TypeError:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"""Could not parse "max_value" of {max_value} (of type "{type(max_value)!s}) into datetime \
representation.""" # noqa: E501 # FIXME CoP
)
if isinstance(min_value, datetime.datetime) or isinstance(max_value, datetime.datetime):
if not isinstance(metric_value, datetime.datetime):
try:
metric_value = parse(metric_value)
except TypeError:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"""Could not parse "metric_value" of {metric_value} (of type "{type(metric_value)!s}) into datetime \
representation.""" # noqa: E501 # FIXME CoP
)
if isinstance(min_value, datetime.date) or isinstance(max_value, datetime.date):
if not isinstance(metric_value, datetime.date):
try:
metric_value = parse(metric_value).date()
except TypeError:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"""Could not parse "metric_value" of {metric_value} (of type "{type(metric_value)!s}) into datetime \
representation.""" # noqa: E501 # FIXME CoP
)
# Checking if mean lies between thresholds
if min_value is not None:
if strict_min:
above_min = metric_value > min_value
else:
above_min = metric_value >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = metric_value < max_value
else:
below_max = metric_value <= max_value
else:
below_max = True
success = bool(above_min and below_max)
return {"success": success, "result": {"observed_value": metric_value}}
| BatchExpectation |
python | pydata__xarray | xarray/backends/netCDF4_.py | {
"start": 11995,
"end": 12141
} | class ____:
"""Pickleable equivalent of `lambda: value`."""
value: Any
def __call__(self):
return self.value
@dataclass
| _Thunk |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 978333,
"end": 987984
} | class ____(FieldChannelMixin, core.SecondaryFieldDef):
r"""
X2 schema wrapper.
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x2"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> X2: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> X2: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> X2: ...
@overload
def bandPosition(self, _: float, /) -> X2: ...
@overload
def bin(self, _: None, /) -> X2: ...
@overload
def field(self, _: str | RepeatRef, /) -> X2: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> X2: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> X2: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> X2: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> X2: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
**kwds,
)
@with_property_setters
| X2 |
python | tornadoweb__tornado | demos/file_upload/file_receiver.py | {
"start": 407,
"end": 872
} | class ____(tornado.web.RequestHandler):
def post(self):
for field_name, files in self.request.files.items():
for info in files:
filename, content_type = info["filename"], info["content_type"]
body = info["body"]
logging.info(
'POST "%s" "%s" %d bytes', filename, content_type, len(body)
)
self.write("OK")
@tornado.web.stream_request_body
| POSTHandler |
python | plotly__plotly.py | plotly/graph_objs/isosurface/slices/_z.py | {
"start": 233,
"end": 5303
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.slices"
_path_str = "isosurface.slices.z"
_valid_props = {"fill", "locations", "locationssrc", "show"}
@property
def fill(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the `slices` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def locations(self):
"""
Specifies the location(s) of slices on the axis. When not
specified slices would be created for all points of the axis z
except start and end.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
@property
def show(self):
"""
Determines whether or not slice planes about the z dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis z except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
show
Determines whether or not slice planes about the z
dimension are drawn.
"""
def __init__(
self,
arg=None,
fill=None,
locations=None,
locationssrc=None,
show=None,
**kwargs,
):
"""
Construct a new Z object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.slices.Z`
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis z except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
show
Determines whether or not slice planes about the z
dimension are drawn.
Returns
-------
Z
"""
super().__init__("z")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.slices.Z
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.slices.Z`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("fill", arg, fill)
self._set_property("locations", arg, locations)
self._set_property("locationssrc", arg, locationssrc)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Z |
python | kamyu104__LeetCode-Solutions | Python/unique-binary-search-trees-ii.py | {
"start": 755,
"end": 1385
} | class ____(object):
# @return a list of tree node
def generateTrees(self, n):
return self.generateTreesRecu(1, n)
def generateTreesRecu(self, low, high):
result = []
if low > high:
result.append(None)
for i in xrange(low, high + 1):
left = self.generateTreesRecu(low, i - 1)
right = self.generateTreesRecu(i + 1, high)
for j in left:
for k in right:
cur = TreeNode(i)
cur.left = j
cur.right = k
result.append(cur)
return result
| Solution |
python | huggingface__transformers | src/transformers/models/shieldgemma2/configuration_shieldgemma2.py | {
"start": 806,
"end": 4805
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ShieldGemma2ForImageClassification`]. It is used to instantiate an
ShieldGemma2ForImageClassification according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the shieldgemma-2-4b-it.
e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[ShieldGemma2TextConfig, dict]`, *optional*):
The config object of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*):
Custom vision config or dict.
mm_tokens_per_image (`int`, *optional*, defaults to 256):
The number of tokens per image embedding.
boi_token_index (`int`, *optional*, defaults to 255999):
The begin-of-image token index to wrap the image prompt.
eoi_token_index (`int`, *optional*, defaults to 256000):
The end-of-image token index to wrap the image prompt.
image_token_index (`int`, *optional*, defaults to 262144):
The image token index to encode the image prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import ShieldGemma2ForConditionalGeneration, ShieldGemma2Config, SiglipVisionConfig, ShieldGemma2TextConfig
>>> # Initializing a Siglip-like vision config
>>> vision_config = SiglipVisionConfig()
>>> # Initializing a ShieldGemma2 Text config
>>> text_config = ShieldGemma2TextConfig()
>>> # Initializing a ShieldGemma2 gemma-3-4b style configuration
>>> configuration = ShieldGemma2Config(vision_config, text_config)
>>> # Initializing a model from the gemma-3-4b style configuration
>>> model = ShieldGemma2TextConfig(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "shieldgemma2"
attribute_map = {
"image_token_id": "image_token_index",
"boi_token_id": "boi_token_index",
"eoi_token_id": "eoi_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
text_config=None,
vision_config=None,
mm_tokens_per_image: int = 256,
boi_token_index: int = 255_999,
eoi_token_index: int = 256_000,
image_token_index: int = 262_144,
initializer_range: float = 0.02,
**kwargs,
):
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"]()
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "gemma3_text")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["gemma3_text"]()
self.text_config = text_config
self.vision_config = vision_config
self.mm_tokens_per_image = mm_tokens_per_image
self.boi_token_index = boi_token_index
self.eoi_token_index = eoi_token_index
self.image_token_index = image_token_index
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["ShieldGemma2Config"]
| ShieldGemma2Config |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/items/GLBoxItem.py | {
"start": 189,
"end": 2513
} | class ____(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays a wire-frame box.
"""
def __init__(self, size=None, color=None, glOptions='translucent', parentItem=None):
super().__init__()
self.lineplot = None # mark that we are still initializing
if size is None:
size = QtGui.QVector3D(1,1,1)
self.setSize(size=size)
if color is None:
color = (255,255,255,80)
self.setColor(color)
self.lineplot = GLLinePlotItem(
parentItem=self, glOptions=glOptions, mode='lines'
)
self.setParentItem(parentItem)
self.updateLines()
def setSize(self, x=None, y=None, z=None, size=None):
"""
Set the size of the box (in its local coordinate system; this does not affect the transform)
Arguments can be x,y,z or size=QVector3D().
"""
if size is not None:
x = size.x()
y = size.y()
z = size.z()
self.__size = [x,y,z]
self.updateLines()
def size(self):
return self.__size[:]
def setColor(self, *args):
"""Set the color of the box. Arguments are the same as those accepted by functions.mkColor()"""
self.__color = fn.mkColor(*args)
self.updateLines()
def color(self):
return self.__color
def updateLines(self):
if self.lineplot is None:
# still initializing
return
x,y,z = self.size()
pos = np.array([
[0, 0, 0],
[0, 0, z],
[x, 0, 0],
[x, 0, z],
[0, y, 0],
[0, y, z],
[x, y, 0],
[x, y, z],
[0, 0, 0],
[0, y, 0],
[x, 0, 0],
[x, y, 0],
[0, 0, z],
[0, y, z],
[x, 0, z],
[x, y, z],
[0, 0, 0],
[x, 0, 0],
[0, y, 0],
[x, y, 0],
[0, 0, z],
[x, 0, z],
[0, y, z],
[x, y, z],
], dtype=np.float32)
color = self.color().getRgbF()
self.lineplot.setData(pos=pos, color=color)
self.update()
| GLBoxItem |
python | pypa__warehouse | tests/unit/cache/origin/test_init.py | {
"start": 2198,
"end": 4721
} | class ____:
def test_no_cache_key(self):
response = pretend.stub()
@origin.origin_cache(1)
def view(context, request):
return response
def raiser(iface):
raise LookupError
context = pretend.stub()
request = pretend.stub(registry={"cache_keys": {}}, find_service=raiser)
assert view(context, request) is response
def test_no_origin_cache(self):
class Fake:
pass
response = pretend.stub()
@origin.origin_cache(1)
def view(context, request):
return response
@pretend.call_recorder
def raiser(iface):
raise LookupError
context = Fake()
request = pretend.stub(
registry={
"cache_keys": {Fake: lambda X: origin.CacheKeys(cache=[], purge=[])}
},
find_service=raiser,
)
assert view(context, request) is response
assert raiser.calls == [pretend.call(IOriginCache)]
@pytest.mark.parametrize(("seconds", "keys"), [(745, None), (823, ["nope", "yup"])])
def test_response_hook(self, seconds, keys):
class Fake:
pass
class Cache:
@staticmethod
@pretend.call_recorder
def cache(
keys, request, response, seconds, stale_while_revalidate, stale_if_error
):
pass
response = pretend.stub()
deco = origin.origin_cache(seconds, keys=keys)
@deco
def view(context, request):
return response
key_maker = pretend.call_recorder(
lambda obj: origin.CacheKeys(cache=["one", "two"], purge=[])
)
cacher = Cache()
context = Fake()
callbacks = []
request = pretend.stub(
registry={"cache_keys": {Fake: key_maker}},
find_service=lambda iface: cacher,
add_response_callback=callbacks.append,
)
assert view(context, request) is response
assert key_maker.calls == [pretend.call(context)]
assert len(callbacks) == 1
callbacks[0](request, response)
assert cacher.cache.calls == [
pretend.call(
["one", "two"] + ([] if keys is None else keys),
request,
response,
seconds=seconds,
stale_while_revalidate=None,
stale_if_error=None,
)
]
| TestOriginCache |
python | coleifer__peewee | bench.py | {
"start": 322,
"end": 370
} | class ____(Base):
name = TextField()
| Collection |
python | walkccc__LeetCode | solutions/2143. Choose Numbers From Two Arrays in Range/2143.py | {
"start": 0,
"end": 643
} | class ____:
def countSubranges(self, nums1: list[int], nums2: list[int]) -> int:
MOD = 1_000_000_007
ans = 0
# {sum, count}, add if choose from nums1, minus if choose from nums2
dp = collections.Counter()
for a, b in zip(nums1, nums2):
newDp = collections.Counter()
newDp[a] += 1
newDp[-b] += 1
for prevSum, count in dp.items():
# Choose nums1[i]
newDp[prevSum + a] += count
newDp[prevSum + a] %= MOD
# Choose nums2[i]
newDp[prevSum - b] += count
newDp[prevSum - b] %= MOD
dp = newDp
ans += dp[0]
ans %= MOD
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/number-of-longest-increasing-subsequence.py | {
"start": 31,
"end": 772
} | class ____(object):
def findNumberOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result, max_len = 0, 0
dp = [[1, 1] for _ in xrange(len(nums))] # {length, number} pair
for i in xrange(len(nums)):
for j in xrange(i):
if nums[i] > nums[j]:
if dp[i][0] == dp[j][0]+1:
dp[i][1] += dp[j][1]
elif dp[i][0] < dp[j][0]+1:
dp[i] = [dp[j][0]+1, dp[j][1]]
if max_len == dp[i][0]:
result += dp[i][1]
elif max_len < dp[i][0]:
max_len = dp[i][0]
result = dp[i][1]
return result
| Solution |
python | huggingface__transformers | src/transformers/models/plbart/modeling_plbart.py | {
"start": 2752,
"end": 3058
} | class ____(PreTrainedModel):
config: PLBartConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["PLBartDecoderLayer", "PLBartEncoderLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
| PLBartPreTrainedModel |
python | getsentry__sentry | src/sentry/overwatch_webhooks/webhook_forwarder.py | {
"start": 1229,
"end": 1610
} | class ____:
organization_integration: OrganizationIntegration
organization_mapping: OrganizationMapping
logger = logging.getLogger("sentry.overwatch_webhook_forwarder")
def verbose_log(msg: str, *, extra: dict | None = None) -> None:
if bool(options.get("overwatch.forward-webhooks.verbose", False)):
logger.info(msg, extra=extra)
| OverwatchOrganizationContext |
python | openai__openai-python | src/openai/types/beta/assistant_stream_event.py | {
"start": 4921,
"end": 5146
} | class ____(BaseModel):
data: Message
"""
Represents a message within a
[thread](https://platform.openai.com/docs/api-reference/threads).
"""
event: Literal["thread.message.created"]
| ThreadMessageCreated |
python | django__django | django/urls/resolvers.py | {
"start": 17436,
"end": 32046
} | class ____:
def __init__(
self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None
):
self.pattern = pattern
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
self._local = Local()
def __repr__(self):
if isinstance(self.urlconf_name, list) and self.urlconf_name:
# Don't bother to output the whole list, it can be huge
urlconf_repr = "<%s list>" % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return "<%s %s (%s:%s) %s>" % (
self.__class__.__name__,
urlconf_repr,
self.app_name,
self.namespace,
self.pattern.describe(),
)
def check(self):
messages = []
for pattern in self.url_patterns:
messages.extend(check_resolver(pattern))
return messages or self.pattern.check()
def _populate(self):
# Short-circuit if called recursively in this thread to prevent
# infinite recursion. Concurrent threads may call this at the same
# time and will need to continue, so set 'populating' on a
# thread-local variable.
if getattr(self._local, "populating", False):
return
try:
self._local.populating = True
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for url_pattern in reversed(self.url_patterns):
p_pattern = url_pattern.pattern.regex.pattern
p_pattern = p_pattern.removeprefix("^")
if isinstance(url_pattern, URLPattern):
self._callback_strs.add(url_pattern.lookup_str)
bits = normalize(url_pattern.pattern.regex.pattern)
lookups.appendlist(
url_pattern.callback,
(
bits,
p_pattern,
url_pattern.default_args,
url_pattern.pattern.converters,
),
)
if url_pattern.name is not None:
lookups.appendlist(
url_pattern.name,
(
bits,
p_pattern,
url_pattern.default_args,
url_pattern.pattern.converters,
),
)
else: # url_pattern is a URLResolver.
url_pattern._populate()
if url_pattern.app_name:
apps.setdefault(url_pattern.app_name, []).append(
url_pattern.namespace
)
namespaces[url_pattern.namespace] = (p_pattern, url_pattern)
else:
for name in url_pattern.reverse_dict:
for (
matches,
pat,
defaults,
converters,
) in url_pattern.reverse_dict.getlist(name):
new_matches = normalize(p_pattern + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
{**defaults, **url_pattern.default_kwargs},
{
**self.pattern.converters,
**url_pattern.pattern.converters,
**converters,
},
),
)
for namespace, (
prefix,
sub_pattern,
) in url_pattern.namespace_dict.items():
current_converters = url_pattern.pattern.converters
sub_pattern.pattern.converters.update(current_converters)
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in url_pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(url_pattern._callback_strs)
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._reverse_dict[language_code] = lookups
self._populated = True
finally:
self._local.populating = False
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
@staticmethod
def _extend_tried(tried, pattern, sub_tried=None):
if sub_tried is None:
tried.append([pattern])
else:
tried.extend([pattern, *t] for t in sub_tried)
@staticmethod
def _join_route(route1, route2):
"""Join two routes, without the starting ^ in the second route."""
if not route1:
return route2
route2 = route2.removeprefix("^")
return route1 + route2
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = str(path) # path may be a reverse_lazy object
tried = []
match = self.pattern.match(path)
if match:
new_path, args, kwargs = match
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
self._extend_tried(tried, pattern, e.args[0].get("tried"))
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = {**kwargs, **self.default_kwargs}
# Update the sub_match_dict with the kwargs from the
# sub_match.
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named
# groups. Otherwise, pass all non-named arguments as
# positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = args + sub_match.args
current_route = (
""
if isinstance(pattern, URLPattern)
else str(pattern.pattern)
)
self._extend_tried(tried, pattern, sub_match.tried)
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name, *sub_match.app_names],
[self.namespace, *sub_match.namespaces],
self._join_route(current_route, sub_match.route),
tried,
captured_kwargs=sub_match.captured_kwargs,
extra_kwargs={
**self.default_kwargs,
**sub_match.extra_kwargs,
},
)
tried.append([pattern])
raise Resolver404({"tried": tried, "path": new_path})
raise Resolver404({"path": path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, str):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError as e:
msg = (
"The included URLconf '{name}' does not appear to have "
"any patterns in it. If you see the 'urlpatterns' variable "
"with valid patterns in the file then the issue is probably "
"caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from e
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, "handler%s" % view_type, None)
if not callback:
# No handler specified in file; use lazy import, since
# django.conf.urls imports this file.
from django.conf import urls
callback = getattr(urls, "handler%s" % view_type)
return get_callable(callback)
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, "", *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
if not self._populated:
self._populate()
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults, converters in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, args))
else:
if set(kwargs).symmetric_difference(params).difference(defaults):
continue
matches = True
for k, v in defaults.items():
if k in params:
continue
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = kwargs
# Convert the candidate subs to text using Converter.to_url().
text_candidate_subs = {}
match = True
for k, v in candidate_subs.items():
if k in converters:
try:
text_candidate_subs[k] = converters[k].to_url(v)
except ValueError:
match = False
break
else:
text_candidate_subs[k] = str(v)
if not match:
continue
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace("%", "%%") + result
if re.search(
"^%s%s" % (re.escape(_prefix), pattern),
candidate_pat % text_candidate_subs,
):
# safe characters from `pchar` definition of RFC 3986
url = quote(
candidate_pat % text_candidate_subs,
safe=RFC3986_SUBDELIMS + "/~:@",
)
# Don't allow construction of scheme relative urls.
return escape_leading_slashes(url)
# lookup_view can be URL name or callable, but callables are not
# friendly in error messages.
m = getattr(lookup_view, "__module__", None)
n = getattr(lookup_view, "__name__", None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (_, pattern, _, _) in possibilities]
if patterns:
if args:
arg_msg = "arguments '%s'" % (args,)
elif kwargs:
arg_msg = "keyword arguments '%s'" % kwargs
else:
arg_msg = "no arguments"
msg = "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % (
lookup_view_s,
arg_msg,
len(patterns),
patterns,
)
else:
msg = (
"Reverse for '%(view)s' not found. '%(view)s' is not "
"a valid view function or pattern name." % {"view": lookup_view_s}
)
raise NoReverseMatch(msg)
| URLResolver |
python | pandas-dev__pandas | asv_bench/benchmarks/io/json.py | {
"start": 9004,
"end": 9705
} | class ____:
def setup_cache(self):
df = DataFrame([[1]])
df2 = DataFrame(range(8), date_range("1/1/2000", periods=8, freq="min"))
frames = {"int": df, "float": df.astype(float), "datetime": df2}
return frames
def peakmem_int(self, frames):
df = frames["int"]
for _ in range(100_000):
df.to_json()
def peakmem_float(self, frames):
df = frames["float"]
for _ in range(100_000):
df.to_json()
def peakmem_time(self, frames):
df = frames["datetime"]
for _ in range(10_000):
df.to_json(orient="table")
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| ToJSONMem |
python | ipython__ipython | tests/test_process.py | {
"start": 2669,
"end": 6091
} | class ____(tt.TempFileMixin):
def setUp(self):
"""Make a valid python temp file."""
lines = [
"import sys",
"print('on stdout', end='', file=sys.stdout)",
"print('on stderr', end='', file=sys.stderr)",
"sys.stdout.flush()",
"sys.stderr.flush()",
]
self.mktmp("\n".join(lines))
def test_system(self):
status = system(f'{python} "{self.fname}"')
self.assertEqual(status, 0)
def test_system_quotes(self):
status = system('%s -c "import sys"' % python)
self.assertEqual(status, 0)
def assert_interrupts(self, command):
"""
Interrupt a subprocess after a second.
"""
if threading.main_thread() != threading.current_thread():
raise pytest.skip("Can't run this test if not in main thread.")
# Some tests can overwrite SIGINT handler (by using pdb for example),
# which then breaks this test, so just make sure it's operating
# normally.
signal.signal(signal.SIGINT, signal.default_int_handler)
def interrupt():
# Wait for subprocess to start:
time.sleep(0.5)
interrupt_main()
threading.Thread(target=interrupt).start()
start = time.time()
try:
result = command()
except KeyboardInterrupt:
# Success!
pass
end = time.time()
self.assertTrue(
end - start < 2, "Process didn't die quickly: %s" % (end - start)
)
return result
def test_system_interrupt(self):
"""
When interrupted in the way ipykernel interrupts IPython, the
subprocess is interrupted.
"""
def command():
return system('%s -c "import time; time.sleep(5)"' % python)
status = self.assert_interrupts(command)
self.assertNotEqual(
status, 0, f"The process wasn't interrupted. Status: {status}"
)
def test_getoutput(self):
out = getoutput(f'{python} "{self.fname}"')
# we can't rely on the order the line buffered streams are flushed
try:
self.assertEqual(out, "on stderron stdout")
except AssertionError:
self.assertEqual(out, "on stdouton stderr")
def test_getoutput_quoted(self):
out = getoutput('%s -c "print (1)"' % python)
self.assertEqual(out.strip(), "1")
# Invalid quoting on windows
@dec.skip_win32
def test_getoutput_quoted2(self):
out = getoutput("%s -c 'print (1)'" % python)
self.assertEqual(out.strip(), "1")
out = getoutput("%s -c 'print (\"1\")'" % python)
self.assertEqual(out.strip(), "1")
def test_getoutput_error(self):
out, err = getoutputerror(f'{python} "{self.fname}"')
self.assertEqual(out, "on stdout")
self.assertEqual(err, "on stderr")
def test_get_output_error_code(self):
quiet_exit = '%s -c "import sys; sys.exit(1)"' % python
out, err, code = get_output_error_code(quiet_exit)
self.assertEqual(out, "")
self.assertEqual(err, "")
self.assertEqual(code, 1)
out, err, code = get_output_error_code(f'{python} "{self.fname}"')
self.assertEqual(out, "on stdout")
self.assertEqual(err, "on stderr")
self.assertEqual(code, 0)
| SubProcessTestCase |
python | walkccc__LeetCode | solutions/3174. Clear Digits/3174.py | {
"start": 0,
"end": 340
} | class ____:
def clearDigits(self, s: str) -> str:
ans = []
for c in s:
if c.isdigit():
# Since `ans` only contains non-digit characters, removing the last
# character is equivalent to deleting the closest non-digit character.
ans.pop()
else:
ans.append(c)
return ''.join(ans)
| Solution |
python | python-visualization__folium | folium/map.py | {
"start": 20432,
"end": 22064
} | class ____(MacroElement):
"""Fit the map to contain a bounding box with the
maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
{{ this._parent.get_name() }}.fitBounds(
{{ this.bounds|tojson }},
{{ this.options|tojson }}
);
{% endmacro %}
"""
)
def __init__(
self,
bounds: TypeBounds,
padding_top_left: Optional[Sequence[float]] = None,
padding_bottom_right: Optional[Sequence[float]] = None,
padding: Optional[Sequence[float]] = None,
max_zoom: Optional[int] = None,
):
super().__init__()
self._name = "FitBounds"
self.bounds = bounds
self.options = parse_options(
max_zoom=max_zoom,
padding_top_left=padding_top_left,
padding_bottom_right=padding_bottom_right,
padding=padding,
)
| FitBounds |
python | django__django | tests/fixtures/models.py | {
"start": 1860,
"end": 1974
} | class ____(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
| PersonManager |
python | apache__airflow | providers/imap/tests/unit/imap/sensors/test_imap_attachment.py | {
"start": 959,
"end": 2076
} | class ____:
def setup_method(self):
self.kwargs = dict(
attachment_name="test_file",
check_regex=False,
mail_folder="INBOX",
mail_filter="All",
task_id="test_task",
dag=None,
)
@pytest.mark.parametrize("has_attachment_return_value", [True, False])
@patch("airflow.providers.imap.sensors.imap_attachment.ImapHook")
def test_poke(self, mock_imap_hook, has_attachment_return_value):
mock_imap_hook.return_value.__enter__ = Mock(return_value=mock_imap_hook)
mock_imap_hook.has_mail_attachment.return_value = has_attachment_return_value
has_attachment = ImapAttachmentSensor(**self.kwargs).poke(context={})
assert has_attachment == mock_imap_hook.has_mail_attachment.return_value
mock_imap_hook.has_mail_attachment.assert_called_once_with(
name=self.kwargs["attachment_name"],
check_regex=self.kwargs["check_regex"],
mail_folder=self.kwargs["mail_folder"],
mail_filter=self.kwargs["mail_filter"],
)
| TestImapAttachmentSensor |
python | walkccc__LeetCode | solutions/2876. Count Visited Nodes in a Directed Graph/2876.py | {
"start": 0,
"end": 1174
} | class ____:
def countVisitedNodes(self, edges: list[int]) -> list[int]:
n = len(edges)
ans = [0] * n
inDegrees = [0] * n
seen = [False] * n
stack = []
for v in edges:
inDegrees[v] += 1
# Perform topological sorting.
q = collections.deque([i for i, d in enumerate(inDegrees) if d == 0])
# Push non-cyclic nodes to stack.
while q:
u = q.popleft()
inDegrees[edges[u]] -= 1
if inDegrees[edges[u]] == 0:
q.append(edges[u])
stack.append(u)
seen[u] = True
# Fill the length of cyclic nodes.
for i in range(n):
if not seen[i]:
self._fillCycle(edges, i, seen, ans)
# Fill the length of non-cyclic nodes.
while stack:
u = stack.pop()
ans[u] = ans[edges[u]] + 1
return ans
def _fillCycle(
self,
edges: list[int],
start: int,
seen: list[bool],
ans: list[int],
) -> None:
cycleLength = 0
u = start
while not seen[u]:
cycleLength += 1
seen[u] = True
u = edges[u]
ans[start] = cycleLength
u = edges[start]
while u != start:
ans[u] = cycleLength
u = edges[u]
| Solution |
python | cython__cython | Cython/Tempita/_looper.py | {
"start": 1310,
"end": 3975
} | class ____:
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, str)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
| loop_pos |
python | neetcode-gh__leetcode | python/0904_fruit_into_baskets.py | {
"start": 0,
"end": 510
} | class ____:
def totalFruit(self, fruits: List[int]) -> int:
tr = {}
l = r = 0
res = 0
while r < len(fruits):
if fruits[r] not in tr:
tr[fruits[r]] = 1
else:
tr[fruits[r]] += 1
while len(tr) > 2:
tr[fruits[l]] -= 1
if tr[fruits[l]] == 0:
del tr[fruits[l]]
l += 1
res = max(res, r-l+1)
r += 1
return res
| Solution |
python | sqlalchemy__sqlalchemy | test/engine/test_ddlevents.py | {
"start": 18664,
"end": 27845
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
def setup_test(self):
self.engine = engines.mock_engine()
self.metadata = MetaData()
self.users = Table(
"users",
self.metadata,
Column("user_id", Integer, primary_key=True),
Column("user_name", String(40)),
)
def test_table_standalone(self):
users, engine = self.users, self.engine
event.listen(users, "before_create", DDL("mxyzptlk"))
event.listen(users, "after_create", DDL("klptzyxm"))
event.listen(users, "before_drop", DDL("xyzzy"))
event.listen(users, "after_drop", DDL("fnord"))
users.create(self.engine)
strings = [str(x) for x in engine.mock]
assert "mxyzptlk" in strings
assert "klptzyxm" in strings
assert "xyzzy" not in strings
assert "fnord" not in strings
del engine.mock[:]
users.drop(self.engine)
strings = [str(x) for x in engine.mock]
assert "mxyzptlk" not in strings
assert "klptzyxm" not in strings
assert "xyzzy" in strings
assert "fnord" in strings
def test_table_by_metadata(self):
metadata, users, engine = self.metadata, self.users, self.engine
event.listen(users, "before_create", DDL("mxyzptlk"))
event.listen(users, "after_create", DDL("klptzyxm"))
event.listen(users, "before_drop", DDL("xyzzy"))
event.listen(users, "after_drop", DDL("fnord"))
metadata.create_all(self.engine)
strings = [str(x) for x in engine.mock]
assert "mxyzptlk" in strings
assert "klptzyxm" in strings
assert "xyzzy" not in strings
assert "fnord" not in strings
del engine.mock[:]
metadata.drop_all(self.engine)
strings = [str(x) for x in engine.mock]
assert "mxyzptlk" not in strings
assert "klptzyxm" not in strings
assert "xyzzy" in strings
assert "fnord" in strings
def test_metadata(self):
metadata, engine = self.metadata, self.engine
event.listen(metadata, "before_create", DDL("mxyzptlk"))
event.listen(metadata, "after_create", DDL("klptzyxm"))
event.listen(metadata, "before_drop", DDL("xyzzy"))
event.listen(metadata, "after_drop", DDL("fnord"))
metadata.create_all(self.engine)
strings = [str(x) for x in engine.mock]
assert "mxyzptlk" in strings
assert "klptzyxm" in strings
assert "xyzzy" not in strings
assert "fnord" not in strings
del engine.mock[:]
metadata.drop_all(self.engine)
strings = [str(x) for x in engine.mock]
assert "mxyzptlk" not in strings
assert "klptzyxm" not in strings
assert "xyzzy" in strings
assert "fnord" in strings
def test_conditional_constraint(self):
metadata, users = self.metadata, self.users
nonpg_mock = engines.mock_engine(dialect_name="sqlite")
pg_mock = engines.mock_engine(dialect_name="postgresql")
constraint = CheckConstraint(
"a < b", name="my_test_constraint", table=users
)
# by placing the constraint in an Add/Drop construct, the
# 'inline_ddl' flag is set to False
event.listen(
users,
"after_create",
AddConstraint(constraint).execute_if(dialect="postgresql"),
)
event.listen(
users,
"before_drop",
DropConstraint(constraint).execute_if(dialect="postgresql"),
)
metadata.create_all(bind=nonpg_mock)
strings = " ".join(str(x) for x in nonpg_mock.mock)
assert "my_test_constraint" not in strings
metadata.drop_all(bind=nonpg_mock)
strings = " ".join(str(x) for x in nonpg_mock.mock)
assert "my_test_constraint" not in strings
metadata.create_all(bind=pg_mock)
strings = " ".join(str(x) for x in pg_mock.mock)
assert "my_test_constraint" in strings
metadata.drop_all(bind=pg_mock)
strings = " ".join(str(x) for x in pg_mock.mock)
assert "my_test_constraint" in strings
@testing.combinations(("dialect",), ("callable",), ("callable_w_state",))
def test_inline_ddl_if_dialect_name(self, ddl_if_type):
nonpg_mock = engines.mock_engine(dialect_name="sqlite")
pg_mock = engines.mock_engine(dialect_name="postgresql")
metadata = MetaData()
capture_mock = Mock()
state = object()
if ddl_if_type == "dialect":
ddl_kwargs = dict(dialect="postgresql")
elif ddl_if_type == "callable":
def is_pg(ddl, target, bind, **kw):
capture_mock.is_pg(ddl, target, bind, **kw)
return kw["dialect"].name == "postgresql"
ddl_kwargs = dict(callable_=is_pg)
elif ddl_if_type == "callable_w_state":
def is_pg(ddl, target, bind, **kw):
capture_mock.is_pg(ddl, target, bind, **kw)
return kw["dialect"].name == "postgresql"
ddl_kwargs = dict(callable_=is_pg, state=state)
else:
assert False
data_col = Column("data", String)
t = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("num", Integer),
data_col,
Index("my_pg_index", data_col).ddl_if(**ddl_kwargs),
CheckConstraint("num > 5").ddl_if(**ddl_kwargs),
)
metadata.create_all(nonpg_mock)
eq_(len(nonpg_mock.mock), 1)
self.assert_compile(
nonpg_mock.mock[0],
"CREATE TABLE a (id INTEGER NOT NULL, num INTEGER, "
"data VARCHAR, PRIMARY KEY (id))",
dialect=nonpg_mock.dialect,
)
metadata.create_all(pg_mock)
eq_(len(pg_mock.mock), 2)
self.assert_compile(
pg_mock.mock[0],
"CREATE TABLE a (id SERIAL NOT NULL, num INTEGER, "
"data VARCHAR, PRIMARY KEY (id), CHECK (num > 5))",
dialect=pg_mock.dialect,
)
self.assert_compile(
pg_mock.mock[1],
"CREATE INDEX my_pg_index ON a (data)",
dialect="postgresql",
)
the_index = list(t.indexes)[0]
the_constraint = list(
c for c in t.constraints if isinstance(c, CheckConstraint)
)[0]
if ddl_if_type in ("callable", "callable_w_state"):
if ddl_if_type == "callable":
check_state = None
else:
check_state = state
eq_(
capture_mock.mock_calls,
[
mock.call.is_pg(
mock.ANY,
the_index,
mock.ANY,
state=check_state,
dialect=nonpg_mock.dialect,
compiler=None,
),
mock.call.is_pg(
mock.ANY,
the_constraint,
None,
state=check_state,
dialect=nonpg_mock.dialect,
compiler=mock.ANY,
),
mock.call.is_pg(
mock.ANY,
the_index,
mock.ANY,
state=check_state,
dialect=pg_mock.dialect,
compiler=None,
),
mock.call.is_pg(
mock.ANY,
the_constraint,
None,
state=check_state,
dialect=pg_mock.dialect,
compiler=mock.ANY,
),
],
)
@testing.requires.sqlite
def test_ddl_execute(self):
engine = create_engine("sqlite:///")
cx = engine.connect()
cx.begin()
ddl = DDL("SELECT 1")
r = cx.execute(ddl)
eq_(list(r), [(1,)])
def test_platform_escape(self):
"""test the escaping of % characters in the DDL construct."""
default_from = testing.db.dialect.statement_compiler(
testing.db.dialect, None
).default_from()
# We're abusing the DDL()
# construct here by pushing a SELECT through it
# so that we can verify the round trip.
# the DDL() will trigger autocommit, which prohibits
# some DBAPIs from returning results (pyodbc), so we
# run in an explicit transaction.
with testing.db.begin() as conn:
eq_(
conn.execute(
text("select 'foo%something'" + default_from)
).scalar(),
"foo%something",
)
eq_(
conn.execute(
DDL("select 'foo%%something'" + default_from)
).scalar(),
"foo%something",
)
| DDLExecutionTest |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py | {
"start": 7667,
"end": 8854
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreHook")
def test_assert_valid_hook_call(self, mock_hook):
task = CloudMemorystoreGetInstanceOperator(
task_id=TEST_TASK_ID,
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_instance.return_value = Instance(name=TEST_NAME)
task.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_instance.assert_called_once_with(
location=TEST_LOCATION,
instance=TEST_INSTANCE_NAME,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudMemorystoreGetInstanceOperator |
python | google__pytype | pytype/tools/traces/traces_test.py | {
"start": 567,
"end": 677
} | class ____(traces.MatchAstVisitor):
def visit_Module(self, node):
self.match(node)
| _NotImplementedVisitor |
python | openai__openai-python | src/openai/types/realtime/realtime_conversation_item_assistant_message.py | {
"start": 947,
"end": 1715
} | class ____(BaseModel):
content: List[Content]
"""The content of the message."""
role: Literal["assistant"]
"""The role of the message sender. Always `assistant`."""
type: Literal["message"]
"""The type of the item. Always `message`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
| RealtimeConversationItemAssistantMessage |
python | kamyu104__LeetCode-Solutions | Python/number-of-bit-changes-to-make-two-integers-equal.py | {
"start": 357,
"end": 616
} | class ____(object):
def minChanges(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
def popcount(x):
return bin(x).count('1')
return popcount(n^k) if n|(n^k) == n else -1
| Solution2 |
python | ray-project__ray | rllib/core/testing/torch/bc_module.py | {
"start": 1699,
"end": 3888
} | class ____(TorchRLModule):
"""An example of an RLModule that uses an encoder shared with other things.
For example, we could consider a multi-agent case where for inference each agent
needs to know the global state of the environment, as well as the local state of
itself. For better representation learning we would like to share the encoder
across all the modules. So this module simply accepts the encoder object as its
input argument and uses it to encode the global state. The local state is passed
through as is. The policy head is then a simple MLP that takes the concatenation of
the global and local state as input and outputs the action logits.
"""
def __init__(
self,
encoder: nn.Module,
local_dim: int,
hidden_dim: int,
action_dim: int,
config=None,
) -> None:
super().__init__(config=config)
self.encoder = encoder
self.policy_head = nn.Sequential(
nn.Linear(hidden_dim + local_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, action_dim),
)
def get_train_action_dist_cls(self):
return TorchCategorical
def get_exploration_action_dist_cls(self):
return TorchCategorical
def get_inference_action_dist_cls(self):
return TorchCategorical
@override(RLModule)
def _default_input_specs(self):
return [("obs", "global"), ("obs", "local")]
@override(RLModule)
def _forward_inference(self, batch):
with torch.no_grad():
return self._common_forward(batch)
@override(RLModule)
def _forward_exploration(self, batch):
with torch.no_grad():
return self._common_forward(batch)
@override(RLModule)
def _forward_train(self, batch):
return self._common_forward(batch)
def _common_forward(self, batch):
obs = batch["obs"]
global_enc = self.encoder(obs["global"])
policy_in = torch.cat([global_enc, obs["local"]], dim=-1)
action_logits = self.policy_head(policy_in)
return {Columns.ACTION_DIST_INPUTS: action_logits}
| BCTorchRLModuleWithSharedGlobalEncoder |
python | huggingface__transformers | src/transformers/models/parakeet/modeling_parakeet.py | {
"start": 27649,
"end": 29439
} | class ____(ModelOutput):
"""
Outputs of Parakeet models.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor
logits: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
hidden_states: Optional[tuple[tuple[torch.FloatTensor]]] = None
@auto_docstring(
custom_intro="""
Parakeet Encoder with a Connectionist Temporal Classification (CTC) head.
"""
)
| ParakeetGenerateOutput |
python | scipy__scipy | scipy/interpolate/tests/test_interpolate.py | {
"start": 53713,
"end": 71721
} | class ____:
def test_simple(self, xp):
c = xp.asarray([[1, 4], [2, 5], [3, 6]])
x = xp.asarray([0, 0.5, 1])
p = PPoly(c, x)
xp_assert_close(p(0.3), xp.asarray(1*0.3**2 + 2*0.3 + 3, dtype=xp.float64))
xp_assert_close(
p(0.7), xp.asarray(4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6, dtype=xp.float64)
)
def test_periodic(self, xp):
c = xp.asarray([[1, 4], [2, 5], [3, 6]])
x = xp.asarray([0, 0.5, 1])
p = PPoly(c, x, extrapolate='periodic')
xp_assert_close(p(1.3),
xp.asarray(1 * 0.3 ** 2 + 2 * 0.3 + 3, dtype=xp.float64))
xp_assert_close(
p(-0.3),
xp.asarray(4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6, dtype=xp.float64)
)
xp_assert_close(p(1.3, 1), xp.asarray(2 * 0.3 + 2, dtype=xp.float64))
xp_assert_close(p(-0.3, 1), xp.asarray(8 * (0.7 - 0.5) + 5, dtype=xp.float64))
def test_read_only(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
xnew = np.array([0, 0.1, 0.2])
PPoly(c, x, extrapolate='periodic')
for writeable in (True, False):
x.flags.writeable = writeable
c.flags.writeable = writeable
f = PPoly(c, x)
vals = f(xnew)
assert np.isfinite(vals).all()
def test_descending(self):
def binom_matrix(power):
n = np.arange(power + 1).reshape(-1, 1)
k = np.arange(power + 1)
B = binom(n, k)
return B[::-1, ::-1]
rng = np.random.RandomState(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(rng.uniform(0, 10, m + 1))
ca = rng.uniform(-2, 2, size=(power + 1, m))
h = np.diff(x)
h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
B = binom_matrix(power)
cap = ca * h_powers
cdp = np.dot(B.T, cap)
cd = cdp / h_powers
pa = PPoly(ca, x, extrapolate=True)
pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = rng.uniform(-10, 20, 100)
xp_assert_close(pa(x_test), pd(x_test), rtol=1e-13)
xp_assert_close(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
xp_assert_close(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in rng.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
xp_assert_close(int_a, int_d, rtol=1e-13)
xp_assert_close(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-13)
roots_d = pd.roots()
roots_a = pa.roots()
xp_assert_close(roots_a, np.sort(roots_d), rtol=1e-12)
def test_multi_shape(self, xp):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert p.x.shape == x.shape
assert p.c.shape == c.shape
assert p(0.3).shape == c.shape[2:]
assert p(np.random.rand(5, 6)).shape == (5, 6) + c.shape[2:]
dp = p.derivative()
assert dp.c.shape == (5, 2, 1, 2, 3)
ip = p.antiderivative()
assert ip.c.shape == (7, 2, 1, 2, 3)
def test_construct_fast(self):
np.random.seed(1234)
c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
x = np.array([0, 0.5, 1])
p = PPoly.construct_fast(c, x)
xp_assert_close(p(0.3), np.asarray(1*0.3**2 + 2*0.3 + 3))
xp_assert_close(p(0.7), np.asarray(4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6))
def test_vs_alternative_implementations(self):
rng = np.random.RandomState(1234)
c = rng.rand(3, 12, 22)
x = np.sort(np.r_[0, rng.rand(11), 1])
p = PPoly(c, x)
xp = np.r_[0.3, 0.5, 0.33, 0.6]
expected = _ppoly_eval_1(c, x, xp)
xp_assert_close(p(xp), expected)
expected = _ppoly_eval_2(c[:,:,0], x, xp)
xp_assert_close(p(xp)[:, 0], expected)
def test_from_spline(self):
rng = np.random.RandomState(1234)
x = np.sort(np.r_[0, rng.rand(11), 1])
y = rng.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
xp_assert_close(pp(xi), splev(xi, spl))
# make sure .from_spline accepts BSpline objects
b = BSpline(*spl)
ppp = PPoly.from_spline(b)
xp_assert_close(ppp(xi), b(xi))
# BSpline's extrapolate attribute propagates unless overridden
t, c, k = spl
for extrap in (None, True, False):
b = BSpline(t, c, k, extrapolate=extrap)
p = PPoly.from_spline(b)
assert p.extrapolate == b.extrapolate
def test_from_spline_2(self, xp):
# BSpline namespace propagates to PPoly
rng = np.random.RandomState(1234)
x = np.sort(np.r_[0, rng.rand(11), 1])
y = rng.rand(len(x))
t, c, k = splrep(x, y, s=0)
spl = BSpline(xp.asarray(t), xp.asarray(c), k)
pp = PPoly.from_spline(spl)
xi = xp.linspace(0, 1, 11)
xp_assert_close(pp(xi), spl(xi))
def test_derivative_simple(self, xp):
np.random.seed(1234)
c = xp.asarray([[4, 3, 2, 1]]).T
dc = xp.asarray([[3*4, 2*3, 2]]).T
ddc = xp.asarray([[2*3*4, 1*2*3]]).T
x = xp.asarray([0, 1])
pp = PPoly(c, x)
dpp = PPoly(dc, x)
ddpp = PPoly(ddc, x)
xp_assert_close(pp.derivative().c, dpp.c)
xp_assert_close(pp.derivative(2).c, ddpp.c)
def test_derivative_eval(self):
rng = np.random.RandomState(1234)
x = np.sort(np.r_[0, rng.rand(11), 1])
y = rng.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 3):
xp_assert_close(pp(xi, dx), splev(xi, spl, dx))
def test_derivative(self):
rng = np.random.RandomState(1234)
x = np.sort(np.r_[0, rng.rand(11), 1])
y = rng.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 10):
xp_assert_close(pp(xi, dx), pp.derivative(dx)(xi), err_msg=f"dx={dx}")
def test_antiderivative_of_constant(self):
# https://github.com/scipy/scipy/issues/4216
p = PPoly([[1.]], [0, 1])
xp_assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
xp_assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
def test_antiderivative_regression_4355(self):
# https://github.com/scipy/scipy/issues/4355
p = PPoly([[1., 0.5]], [0, 1, 2])
q = p.antiderivative()
xp_assert_equal(q.c, [[1, 0.5], [0, 1]])
xp_assert_equal(q.x, [0.0, 1, 2])
xp_assert_close(p.integrate(0, 2), np.asarray(1.5))
xp_assert_close(np.asarray(q(2) - q(0)),
np.asarray(1.5))
def test_antiderivative_simple(self, xp):
# [ p1(x) = 3*x**2 + 2*x + 1,
# p2(x) = 1.6875]
c = xp.asarray([[3, 2, 1], [0, 0, 1.6875]], dtype=xp.float64).T
# [ pp1(x) = x**3 + x**2 + x,
# pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
ic = xp.asarray([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]], dtype=xp.float64).T
# [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
# ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
iic = xp.asarray([[1/4, 1/3, 1/2, 0, 0],
[0, 0, 1.6875/2, 0.328125, 0.037434895833333336]],
dtype=xp.float64
).T
x = xp.asarray([0, 0.25, 1], dtype=xp.float64)
pp = PPoly(c, x)
ipp = pp.antiderivative()
iipp = pp.antiderivative(2)
iipp2 = ipp.antiderivative()
xp_assert_close(ipp.x, x)
xp_assert_close(ipp.c.T, ic.T)
xp_assert_close(iipp.c.T, iic.T)
xp_assert_close(iipp2.c.T, iic.T)
def test_antiderivative_vs_derivative(self):
rng = np.random.RandomState(1234)
x = np.linspace(0, 1, 30)**2
y = rng.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
ipp = pp.antiderivative(dx)
# check that derivative is inverse op
pp2 = ipp.derivative(dx)
xp_assert_close(pp.c, pp2.c)
# check continuity
for k in range(dx):
pp2 = ipp.derivative(k)
r = 1e-13
endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
xp_assert_close(
pp2(pp2.x[1:]), pp2(endpoint), rtol=1e-7, err_msg=f"dx={dx} k={k}"
)
def test_antiderivative_vs_spline(self):
rng = np.random.RandomState(1234)
x = np.sort(np.r_[0, rng.rand(11), 1])
y = rng.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
pp2 = pp.antiderivative(dx)
spl2 = splantider(spl, dx)
xi = np.linspace(0, 1, 200)
xp_assert_close(pp2(xi), splev(xi, spl2),
rtol=1e-7)
def test_antiderivative_continuity(self):
c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
ip = p.antiderivative()
# check continuity
xp_assert_close(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
# check that only lowest order coefficients were changed
p2 = ip.derivative()
xp_assert_close(p2.c, p.c)
def test_integrate(self):
rng = np.random.RandomState(1234)
x = np.sort(np.r_[0, rng.rand(11), 1])
y = rng.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
a, b = 0.3, 0.9
ig = pp.integrate(a, b)
ipp = pp.antiderivative()
xp_assert_close(ig, ipp(b) - ipp(a), check_0d=False)
xp_assert_close(ig, splint(a, b, spl), check_0d=False)
a, b = -0.3, 0.9
ig = pp.integrate(a, b, extrapolate=True)
xp_assert_close(ig, ipp(b) - ipp(a), check_0d=False)
assert np.isnan(pp.integrate(a, b, extrapolate=False)).all()
def test_integrate_readonly(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
for writeable in (True, False):
x.flags.writeable = writeable
P = PPoly(c, x)
vals = P.integrate(1, 4)
assert np.isfinite(vals).all()
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = PPoly(c, x, extrapolate='periodic')
I = P.antiderivative()
period_int = np.asarray(I(4) - I(1))
xp_assert_close(P.integrate(1, 4), period_int)
xp_assert_close(P.integrate(-10, -7), period_int)
xp_assert_close(P.integrate(-10, -4), np.asarray(2 * period_int))
xp_assert_close(P.integrate(1.5, 2.5),
np.asarray(I(2.5) - I(1.5)))
xp_assert_close(P.integrate(3.5, 5),
np.asarray(I(2) - I(1) + I(4) - I(3.5)))
xp_assert_close(P.integrate(3.5 + 12, 5 + 12),
np.asarray(I(2) - I(1) + I(4) - I(3.5)))
xp_assert_close(P.integrate(3.5, 5 + 12),
np.asarray(I(2) - I(1) + I(4) - I(3.5) + 4 * period_int))
xp_assert_close(P.integrate(0, -1),
np.asarray(I(2) - I(3)))
xp_assert_close(P.integrate(-9, -10),
np.asarray(I(2) - I(3)))
xp_assert_close(P.integrate(0, -10),
np.asarray(I(2) - I(3) - 3 * period_int))
def test_roots(self):
x = np.linspace(0, 1, 31)**2
y = np.sin(30*x)
spl = splrep(x, y, s=0, k=3)
pp = PPoly.from_spline(spl)
r = pp.roots()
r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
xp_assert_close(r, sproot(spl), atol=1e-15)
def test_roots_idzero(self):
# Roots for piecewise polynomials with identically zero
# sections.
c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
x = np.array([0, 0.4, 0.6, 1.0])
pp = PPoly(c, x)
xp_assert_equal(pp.roots(),
[0.25, 0.4, np.nan, 0.6 + 0.25])
# ditto for p.solve(const) with sections identically equal const
const = 2.
c1 = c.copy()
c1[1, :] += const
pp1 = PPoly(c1, x)
xp_assert_equal(pp1.solve(const),
[0.25, 0.4, np.nan, 0.6 + 0.25])
def test_roots_all_zero(self):
# test the code path for the polynomial being identically zero everywhere
c = [[0], [0]]
x = [0, 1]
p = PPoly(c, x)
xp_assert_equal(p.roots(), [0, np.nan])
xp_assert_equal(p.solve(0), [0, np.nan])
xp_assert_equal(p.solve(1), [])
c = [[0, 0], [0, 0]]
x = [0, 1, 2]
p = PPoly(c, x)
xp_assert_equal(p.roots(), [0, np.nan, 1, np.nan])
xp_assert_equal(p.solve(0), [0, np.nan, 1, np.nan])
xp_assert_equal(p.solve(1), [])
def test_roots_repeated(self):
# Check roots repeated in multiple sections are reported only
# once.
# [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
c = np.array([[1, 0, -1], [-1, 0, 0]]).T
x = np.array([-1, 0, 1])
pp = PPoly(c, x)
xp_assert_equal(pp.roots(), np.asarray([-2.0, 0.0]))
xp_assert_equal(pp.roots(extrapolate=False), np.asarray([0.0]))
def test_roots_discont(self):
# Check that a discontinuity across zero is reported as root
c = np.array([[1], [-1]]).T
x = np.array([0, 0.5, 1])
pp = PPoly(c, x)
xp_assert_equal(pp.roots(), np.asarray([0.5]))
xp_assert_equal(pp.roots(discontinuity=False), np.asarray([]))
# ditto for a discontinuity across y:
xp_assert_equal(pp.solve(0.5), np.asarray([0.5]))
xp_assert_equal(pp.solve(0.5, discontinuity=False), np.asarray([]))
xp_assert_equal(pp.solve(1.5), np.asarray([]))
xp_assert_equal(pp.solve(1.5, discontinuity=False), np.asarray([]))
def test_roots_random(self):
# Check high-order polynomials with random coefficients
rng = np.random.RandomState(1234)
num = 0
for extrapolate in (True, False):
for order in range(0, 20):
x = np.unique(np.r_[0, 10 * rng.rand(30), 10])
c = 2*rng.rand(order+1, len(x)-1, 2, 3) - 1
pp = PPoly(c, x)
for y in [0, rng.random()]:
r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
for i in range(2):
for j in range(3):
rr = r[i,j]
if rr.size > 0:
# Check that the reported roots indeed are roots
num += rr.size
val = pp(rr, extrapolate=extrapolate)[:,i,j]
cmpval = pp(rr, nu=1,
extrapolate=extrapolate)[:,i,j]
msg = f"({extrapolate!r}) r = {repr(rr)}"
xp_assert_close((val-y) / cmpval, np.asarray(0.0),
atol=1e-7,
err_msg=msg, check_shape=False)
# Check that we checked a number of roots
assert num > 100, repr(num)
def test_roots_croots(self):
# Test the complex root finding algorithm
rng = np.random.RandomState(1234)
for k in range(1, 15):
c = rng.rand(k, 1, 130)
if k == 3:
# add a case with zero discriminant
c[:,0,0] = 1, 2, 1
for y in [0, rng.random()]:
w = np.empty(c.shape, dtype=complex)
_ppoly._croots_poly1(c, w, y)
if k == 1:
assert np.isnan(w).all()
continue
res = -y
cres = 0
for i in range(k):
res += c[i,None] * w**(k-1-i)
cres += abs(c[i,None] * w**(k-1-i))
with np.errstate(invalid='ignore'):
res /= cres
res = res.ravel()
res = res[~np.isnan(res)]
xp_assert_close(res, np.zeros_like(res), atol=1e-10)
def test_extrapolate_attr(self):
# [ 1 - x**2 ]
c = np.array([[-1, 0, 1]]).T
x = np.array([0, 1])
for extrapolate in [True, False, None]:
pp = PPoly(c, x, extrapolate=extrapolate)
pp_d = pp.derivative()
pp_i = pp.antiderivative()
if extrapolate is False:
assert np.isnan(pp([-0.1, 1.1])).all()
assert np.isnan(pp_i([-0.1, 1.1])).all()
assert np.isnan(pp_d([-0.1, 1.1])).all()
assert pp.roots() == [1]
else:
xp_assert_close(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
assert not np.isnan(pp_i([-0.1, 1.1])).any()
assert not np.isnan(pp_d([-0.1, 1.1])).any()
xp_assert_close(pp.roots(), np.asarray([1.0, -1.0]))
@make_xp_test_case(BPoly)
| TestPPoly |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/dynamic.py | {
"start": 2371,
"end": 3450
} | class ____(_WriteOnlyAttributeImpl):
_supports_dynamic_iteration = True
collection_history_cls = DynamicCollectionHistory[Any]
query_class: Type[_AppenderMixin[Any]] # type: ignore[assignment]
def __init__(
self,
class_: Union[Type[Any], AliasedClass[Any]],
key: str,
dispatch: _Dispatch[QueryableAttribute[Any]],
target_mapper: Mapper[_T],
order_by: _RelationshipOrderByArg,
query_class: Optional[Type[_AppenderMixin[_T]]] = None,
**kw: Any,
) -> None:
attributes._AttributeImpl.__init__(
self, class_, key, None, dispatch, **kw
)
self.target_mapper = target_mapper
if order_by:
self.order_by = tuple(order_by)
if not query_class:
self.query_class = AppenderQuery
elif _AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
@relationships.RelationshipProperty.strategy_for(lazy="dynamic")
| _DynamicAttributeImpl |
python | pandas-dev__pandas | pandas/tests/indexes/categorical/test_indexing.py | {
"start": 220,
"end": 4991
} | class ____:
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = CategoricalIndex([1, 2, 3], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = CategoricalIndex([2, 1, 3], name="xxx")
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx")
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = CategoricalIndex([2, 1, 3], name="xxx")
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = CategoricalIndex(
list("CBA"), categories=list("ABC"), ordered=True, name="xxx"
)
result = idx.take(np.array([1, 0, -1]))
expected = CategoricalIndex(
list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
)
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = CategoricalIndex(
["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx"
)
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = CategoricalIndex(
list("BCA"), categories=list("ABC"), ordered=True, name="xxx"
)
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = (
"When allow_fill=True and fill_value is not None, all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
idx = CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
)
expected = CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])
expected = CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
)
expected = CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = CategoricalIndex([1, 2, 3], name="foo")
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
| TestTake |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.