language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | lib/ansible/plugins/vars/__init__.py | {
"start": 957,
"end": 1331
} | class ____(AnsiblePlugin):
"""
Loads variables for groups and/or hosts
"""
is_stateless = False
def __init__(self):
""" constructor """
super(BaseVarsPlugin, self).__init__()
self._display = display
def get_vars(self, loader, path, entities):
""" Gets variables. """
self._basedir = basedir(path)
| BaseVarsPlugin |
python | facebook__pyre-check | client/command_arguments.py | {
"start": 1498,
"end": 1636
} | class ____(str, enum.Enum):
_value_: str
NONE = "none"
CLIENT = "client"
CLIENT_AND_BINARY = "client_and_binary"
| VersionKind |
python | Netflix__metaflow | test/core/tests/project_branch.py | {
"start": 72,
"end": 904
} | class ____(MetaflowTest):
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
HEADER = """
import os
os.environ['METAFLOW_BRANCH'] = 'this_is_a_test_branch'
@project(name='project_branch')
"""
@steps(0, ["singleton"], required=True)
def step_single(self):
pass
@steps(1, ["all"])
def step_all(self):
from metaflow import current
assert_equals(current.branch_name, "test.this_is_a_test_branch")
assert_equals(
current.project_flow_name,
"project_branch.test.this_is_a_test_branch.ProjectBranchTestFlow",
)
| ProjectBranchTest |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 97827,
"end": 104155
} | class ____:
async def test_can_not_nonblocking_pause_subflows(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state.state_details = states.StateDetails(pause_reschedule=True)
ctx.run.parent_task_run_id == uuid4()
state_protection = HandlePausingFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ABORT
async def test_can_not_nonblocking_pause_flows_with_deployments_with_reschedule_flag(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state.state_details = states.StateDetails(pause_reschedule=True)
state_protection = HandlePausingFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ABORT
async def test_can_nonblocking_pause_flows_with_deployments_with_reschedule_flag(
self,
session,
initialize_orchestration,
deployment,
):
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state.state_details = states.StateDetails(pause_reschedule=True)
ctx.run.deployment_id = deployment.id
state_protection = HandlePausingFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
async def test_updates_pause_key_tracker(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state = states.Paused(pause_key="hello", timeout_seconds=1000)
async with HandlePausingFlows(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
assert "hello" in ctx.run.empirical_policy.pause_keys
async def test_defaults_pause_key_to_random_uuid(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state = states.Paused(pause_key="hello", timeout_seconds=1000)
assert len(ctx.run.empirical_policy.pause_keys) == 0
async with HandlePausingFlows(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
assert len(ctx.run.empirical_policy.pause_keys) == 1
async def test_does_not_permit_repeat_pauses(
self,
session,
initialize_orchestration,
):
initial_state_type = states.StateType.RUNNING
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state = states.Paused(pause_key="hello", timeout_seconds=1000)
async with HandlePausingFlows(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
ctx2 = await initialize_orchestration(
session,
"flow",
*intended_transition,
run_override=ctx.run,
)
ctx2.proposed_state = states.Paused(pause_key="hello", timeout_seconds=1000)
async with HandlePausingFlows(ctx2, *intended_transition) as ctx2:
await ctx2.validate_proposed_state()
assert ctx2.response_status == SetStateStatus.REJECT
assert ctx2.validated_state.type == states.StateType.RUNNING
@pytest.mark.parametrize("initial_state_type", ALL_ORCHESTRATION_STATES)
async def test_can_only_pause_running_flows(
self,
session,
initial_state_type,
initialize_orchestration,
deployment,
):
proposed_state_type = states.StateType.PAUSED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.run.deployment_id = deployment.id
state_protection = HandlePausingFlows(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
if ctx.initial_state is None:
assert ctx.response_status == SetStateStatus.ABORT
elif ctx.initial_state.is_running():
assert ctx.response_status == SetStateStatus.ACCEPT
else:
assert ctx.response_status == SetStateStatus.REJECT
assert ctx.validated_state_type == initial_state_type
| TestPausingFlows |
python | wandb__wandb | wandb/vendor/pygments/lexers/markup.py | {
"start": 15133,
"end": 15561
} | class ____(MozPreprocHashLexer):
"""
Lexer for Mozilla Preprocessor files (with '%' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozpercentpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^%', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
}
| MozPreprocPercentLexer |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/vertex_ai/test_experiment_service.py | {
"start": 5681,
"end": 6740
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("ExperimentRunHook"))
def test_execute(self, mock_hook):
op = UpdateExperimentRunStateOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
new_state=TEST_TARGET_STATE,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.update_experiment_run_state.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
new_state=TEST_TARGET_STATE,
)
| TestVertexAIUpdateExperimentRunStateOperator |
python | django__django | django/contrib/gis/gdal/error.py | {
"start": 227,
"end": 1575
} | class ____(Exception):
pass
# #### GDAL/OGR error checking codes and routine ####
# OGR Error Codes
OGRERR_DICT = {
1: (GDALException, "Not enough data."),
2: (GDALException, "Not enough memory."),
3: (GDALException, "Unsupported geometry type."),
4: (GDALException, "Unsupported operation."),
5: (GDALException, "Corrupt data."),
6: (GDALException, "OGR failure."),
7: (SRSException, "Unsupported SRS."),
8: (GDALException, "Invalid handle."),
}
# CPL Error Codes
# https://gdal.org/api/cpl.html#cpl-error-h
CPLERR_DICT = {
1: (GDALException, "AppDefined"),
2: (GDALException, "OutOfMemory"),
3: (GDALException, "FileIO"),
4: (GDALException, "OpenFailed"),
5: (GDALException, "IllegalArg"),
6: (GDALException, "NotSupported"),
7: (GDALException, "AssertionFailed"),
8: (GDALException, "NoWriteAccess"),
9: (GDALException, "UserInterrupt"),
10: (GDALException, "ObjectNull"),
}
ERR_NONE = 0
def check_err(code, cpl=False):
"""
Check the given CPL/OGRERR and raise an exception where appropriate.
"""
err_dict = CPLERR_DICT if cpl else OGRERR_DICT
if code == ERR_NONE:
return
elif code in err_dict:
e, msg = err_dict[code]
raise e(msg)
else:
raise GDALException('Unknown error code: "%s"' % code)
| SRSException |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_custom_report.py | {
"start": 29079,
"end": 35144
} | class ____(BaseTest):
start_date = "2024-01-01"
stream_name = "custom_report"
records_number = 7
report_file = "custom_report_hour_of_day"
custom_report_aggregation = "HourOfDay"
@property
def _config(self) -> dict[str, Any]:
return (
ConfigBuilder()
.with_reports_start_date(self.start_date)
.with_custom_reports(
[
{
"name": self.stream_name,
"reporting_object": "AgeGenderAudienceReportRequest",
"report_columns": [
"AccountName",
"AccountNumber",
"AccountId",
"CampaignName",
"CampaignId",
"AdGroupName",
"AdGroupId",
"AdDistribution",
"AgeGroup",
"Gender",
"Impressions",
"Clicks",
"Conversions",
"Spend",
"Revenue",
"ExtendedCost",
"Assists",
"Language",
"AccountStatus",
"CampaignStatus",
"AdGroupStatus",
"BaseCampaignId",
"AllConversions",
"AllRevenue",
"ViewThroughConversions",
"Goal",
"GoalType",
"AbsoluteTopImpressionRatePercent",
"TopImpressionRatePercent",
"ConversionsQualified",
"AllConversionsQualified",
"ViewThroughConversionsQualified",
"ViewThroughRevenue",
],
"report_aggregation": self.custom_report_aggregation,
}
]
)
.build()
)
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "HourOfDay", "Columns": ["AccountName", "AccountNumber", "AccountId", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue", "TimePeriod"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
@freeze_time("2024-05-06")
def test_return_records_from_given_csv_file(self):
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, self.report_file)
assert len(output.records) == self.records_number
@freeze_time("2024-05-06")
def test_return_records_from_given_csv_file_transform_record(self):
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config, self.report_file)
assert len(output.records) == self.records_number
for record in output.records:
record = record.record.data
assert record["TimePeriod"] == "2024-05-06"
assert record["HourOfDay"]
assert record["StartOfTimePeriod"] == self.start_date
assert record["EndOfTimePeriod"] == "2024-05-06"
@freeze_time("2024-05-06")
def test_return_records_incrementally_from_given_csv_file(self):
self.mock_report_apis()
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, self.report_file)
assert len(output.records) == self.records_number
# state is not updated as records don't have a cursor field
assert output.most_recent_state.stream_state.state["TimePeriod"] == "2024-05-06"
@freeze_time("2024-05-06")
def test_return_records_incrementally_with_state_from_given_csv_file(self):
self.mock_report_apis()
state = StateBuilder().with_stream_state(self.stream_name, {"TimePeriod": self.start_date}).build()
output = self.read_stream(self.stream_name, SyncMode.incremental, self._config, self.report_file, state)
assert len(output.records) == self.records_number
# state is not updated as records don't have a cursor field
assert output.most_recent_state.stream_state.state["TimePeriod"] == "2024-05-06"
| CustomReportHourOfDay |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/nn_functional.py | {
"start": 36157,
"end": 39031
} | class ____(Operator):
"""Operator for torch.nn.functional.scaled_dot_product_attention."""
def __init__(self):
super().__init__("torch.nn.functional.scaled_dot_product_attention")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nn.functional.scaled_dot_product_attention"
def can_produce(self, output_spec: Spec) -> bool:
"""Scaled dot product attention can produce tensor outputs with floating point dtypes."""
if not isinstance(output_spec, TensorSpec):
return False
# SDPA needs at least 3 dimensions (batch, seq_len, embed_dim)
if len(output_spec.size) < 3:
return False
return is_float_dtype(output_spec.dtype)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for scaled_dot_product_attention.
SDPA requires:
- query: (batch, seq_len, embed_dim) or (batch, num_heads, seq_len, head_dim)
- key: (batch, seq_len, embed_dim) or (batch, num_heads, seq_len_kv, head_dim)
- value: (batch, seq_len, embed_dim) or (batch, num_heads, seq_len_kv, head_dim)
Output shape matches query shape.
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError(
"ScaledDotProductAttentionOperator can only produce TensorSpec outputs"
)
if len(output_spec.size) < 3:
raise ValueError("SDPA output must have at least 3 dimensions")
# Query has the same shape as output
query_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
# Key and value: match query shape for simplicity
# In practice, seq_len for key/value can differ, but we'll keep it simple
key_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
value_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
return [query_spec, key_spec, value_spec]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for scaled_dot_product_attention operation."""
if len(input_names) != 3:
raise ValueError("SDPA requires exactly 3 inputs: query, key, value")
# Ensure dtype compatibility by converting all inputs to the expected output dtype
target_dtype = str(output_spec.dtype)
query_name, key_name, value_name = input_names
return f"{output_name} = torch.nn.functional.scaled_dot_product_attention({query_name}.to({target_dtype}), {key_name}.to({target_dtype}), {value_name}.to({target_dtype}))"
| ScaledDotProductAttentionOperator |
python | pytorch__pytorch | torch/_export/serde/serialize.py | {
"start": 5498,
"end": 14737
} | class ____(dict):
"""
Dictionary class for deferred instantiation of node metadata values.
Purpose is to avoid creation of symbolic-shape tensors before relevant shape guards are parsed.
"""
def __init__(self):
self.map = {}
self.evaluated = set()
def __setitem__(self, k, v):
self.map[k] = v
def __getitem__(self, k):
out = self.map[k]
if k in self.evaluated:
return out
self.evaluated.add(k)
self.map[k] = out()
return self.map[k]
def __repr__(self):
return self.map.__repr__()
def deserialize_device(d: Device) -> torch.device:
if d.index is None:
return torch.device(type=d.type) # type: ignore[call-overload]
return torch.device(type=d.type, index=d.index)
def deserialize_size(sizes: Sequence[SymInt]) -> tuple[int, ...]:
for sym_int_size in sizes:
assert sym_int_size.type == "as_int", (
f"Only as_int is supported, got {sym_int_size.type}"
)
return tuple(sym_int_size.as_int for sym_int_size in sizes)
def deserialize_stride(strides: Sequence[SymInt]) -> tuple[int, ...]:
for sym_int_stride in strides:
assert sym_int_stride.type == "as_int", (
f"Only as_int is supported, got {sym_int_stride.type}"
)
return tuple(sym_int_stride.as_int for sym_int_stride in strides)
def deserialize_scalar_type(st: ScalarType) -> torch.dtype:
return _SERIALIZE_TO_TORCH_DTYPE[st]
def deserialize_storage_offset(offset: SymInt) -> int:
assert offset.type == "as_int", f"Only as_int is supported, got {offset.type}"
return offset.as_int
def _print_sympy(s: Union[torch.SymInt, torch.SymBool, torch.SymFloat, sympy.Expr]):
if isinstance(s, (torch.SymInt, torch.SymBool, torch.SymFloat)):
s = s.node.expr
return sympy.printing.repr.srepr(s)
def serialize_sym_int(s: Union[int, torch.SymInt]) -> SymInt:
if isinstance(s, (torch.SymInt, sympy.Symbol, int)):
if symbolic_shapes.is_concrete_int(s):
return SymInt.create(as_int=int(s))
else:
assert isinstance(s, (torch.SymInt, sympy.Symbol))
if s.node.hint is None:
return SymInt.create(as_expr=SymExpr(_print_sympy(s)))
else:
return SymInt.create(
as_expr=SymExpr(
_print_sympy(s),
hint=SymExprHint.create(as_int=s.node.hint),
)
)
else:
raise SerializeError(
f"SymInt should be either symbol or int, got `{s}` of type `{type(s)}`"
)
def serialize_sym_float(s: Union[float, torch.SymFloat]) -> SymFloat:
if isinstance(s, (torch.SymFloat, sympy.Symbol, float)):
if symbolic_shapes.is_concrete_float(s):
return SymFloat.create(as_float=float(s))
else:
assert isinstance(s, (torch.SymFloat, sympy.Symbol))
if s.node.hint is None:
return SymFloat.create(as_expr=SymExpr(_print_sympy(s)))
else:
return SymFloat.create(
as_expr=SymExpr(
_print_sympy(s),
hint=SymExprHint.create(as_float=s.node.hint),
)
)
else:
raise SerializeError(
f"SymFloat should be either symbol or float, got `{s}` of type `{type(s)}`"
)
def serialize_sym_bool(s: Union[bool, torch.SymBool]) -> SymBool:
if isinstance(s, (torch.SymBool, bool)):
if symbolic_shapes.is_concrete_bool(s):
return SymBool.create(as_bool=bool(s))
else:
return SymBool.create(as_expr=SymExpr(expr_str=_print_sympy(s)))
else:
raise SerializeError(
f"SymBool should be either symbol or bool, got `{s}` of type `{type(s)}`"
)
def serialize_tensor_meta(t: torch.Tensor) -> TensorMeta:
"""
Extract a TensorMeta describing `t`.
"""
return TensorMeta(
dtype=_TORCH_TO_SERIALIZE_DTYPE[t.dtype],
sizes=[serialize_sym_int(s) for s in t.shape],
requires_grad=t.requires_grad,
device=Device(type=t.device.type, index=t.device.index),
strides=[serialize_sym_int(s) for s in t.stride()],
storage_offset=serialize_sym_int(t.storage_offset()),
layout=_TORCH_TO_SERIALIZE_LAYOUT[t.layout],
)
_CURRENT_DESERIALIZER: Optional["GraphModuleDeserializer"] = None
def _reduce_fake_tensor(fake_tensor: FakeTensor):
is_parameter = isinstance(fake_tensor, torch.nn.Parameter)
tensor_meta = serialize_tensor_meta(fake_tensor)
tensor_meta_bytes = json.dumps(
_dataclass_to_dict(tensor_meta), cls=EnumEncoder
).encode("utf-8")
return _reconstruct_fake_tensor, (tensor_meta_bytes, is_parameter)
def _reconstruct_fake_tensor(
serialized_tensor_meta: bytes, is_parameter: bool
) -> FakeTensor:
# Deserialize the bytes into a TensorMeta
json_tensor_meta = json.loads(serialized_tensor_meta.decode("utf-8"))
tensor_meta = _dict_to_dataclass(TensorMeta, json_tensor_meta)
# Find the current fake mode
assert _CURRENT_DESERIALIZER is not None, (
"Need access to current deserializer state"
)
fake_tensor = _CURRENT_DESERIALIZER.deserialize_tensor_meta(tensor_meta)
if is_parameter:
fake_tensor = torch.nn.Parameter(fake_tensor) # type: ignore[assignment]
# pyrefly: ignore [bad-return]
return fake_tensor
def serialize_torch_artifact(
artifact: Optional[Any], pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL
) -> bytes:
if artifact is None:
return b""
assert FakeTensor not in copyreg.dispatch_table, (
"Refusing to stomp on existing FakeTensor reducer"
)
try:
copyreg.pickle(FakeTensor, _reduce_fake_tensor)
buffer = io.BytesIO()
# This is a workaround for backend's tensor deserialization problem:
# unpickleTensor() always create a tensor on the device where it was originally saved
# This behavior is bad for multi-gpu training, as we wish to directly load the tensor
# on the designated device.
# For now, we simply move the tensor to cpu before saving.
# TODO: this should be fixed by deserialization instead.
torch.save(artifact, buffer, pickle_protocol=pickle_protocol)
return buffer.getvalue()
finally:
del copyreg.dispatch_table[FakeTensor]
def deserialize_torch_artifact(
serialized: Union[dict[str, Any], tuple[Any, ...], bytes],
):
if isinstance(serialized, (dict, tuple)):
return serialized
if len(serialized) == 0:
return {}
buffer = io.BytesIO(serialized)
buffer.seek(0)
# weights_only=False as we want to load custom objects here (e.g. ScriptObject)
try:
artifact = torch.load(buffer, weights_only=True)
except Exception as e:
buffer.seek(0)
artifact = torch.load(buffer, weights_only=False)
log.warning(
"Fallback to weights_only=False succeeded. "
"Loaded object of type %s after initial failure: %s",
type(artifact),
exc_info=e,
)
assert isinstance(artifact, (tuple, dict))
return artifact
def _sympy_int_to_int(val: sympy.Expr, adjust: str) -> Optional[int]:
# Convert simple sympy Integers into concrete int
if val in (sympy.oo, int_oo):
return None
if val in (-sympy.oo, -int_oo):
return None
if isinstance(val, sympy.Integer):
return int(val)
# TODO: Remove this adjustment when Ed gets rid of fractional ranges
log.warning(
"Export constraints cannot be non-integer expressions. Found "
"type %s, and value %s. We will attempt to %s "
"this value.",
type(val),
val,
adjust,
)
if adjust == "floor":
return math.floor(val)
elif adjust == "ceil":
return math.ceil(val)
else:
raise RuntimeError(f"Got invalid adjustment {adjust}")
def _int_to_sympy_int(val: Optional[int], default) -> sympy.Expr:
# Convert concrete int into simple sympy Integers
if val is None:
return default
if val in [-int_oo, int_oo]:
return val
if val == math.inf:
return int_oo
if val == -math.inf:
return -int_oo
return sympy.Integer(val)
def _symbol_index(sym: sympy.Symbol, sym_type: SymT):
return int(str(sym)[len(prefix_str[sym_type]) :])
def serialize_range_constraints(
range_constraints: dict[sympy.Symbol, ValueRanges],
) -> dict[str, RangeConstraint]:
return {
str(k): RangeConstraint(
_sympy_int_to_int(v.lower, "ceil"), # type: ignore[arg-type]
_sympy_int_to_int(v.upper, "floor"), # type: ignore[arg-type]
)
for k, v in range_constraints.items()
}
def _get_schema_from_target(target):
if isinstance(target, torch._ops.OpOverload):
return target._schema
elif type(target) in _serialization_registry:
return _serialization_registry[type(target)].op_schema(target)
raise RuntimeError(f"Cannot find schema for {type(target)}")
@dataclass
| LazyMap |
python | PrefectHQ__prefect | src/prefect/artifacts.py | {
"start": 9341,
"end": 10280
} | class ____(Artifact):
table: Union[dict[str, list[Any]], list[dict[str, Any]], list[list[Any]]]
type: Optional[str] = "table"
@classmethod
def _sanitize(
cls, item: dict[str, Any] | list[Any] | float
) -> dict[str, Any] | list[Any] | int | float | None:
"""
Sanitize NaN values in a given item.
The item can be a dict, list or float.
"""
if isinstance(item, list):
return [cls._sanitize(sub_item) for sub_item in item]
elif isinstance(item, dict):
return {k: cls._sanitize(v) for k, v in item.items()}
elif isinstance(item, float) and math.isnan(item):
return None
else:
return item
async def aformat(self) -> str:
return json.dumps(self._sanitize(self.table))
@async_dispatch(aformat)
def format(self) -> str:
return json.dumps(self._sanitize(self.table))
| TableArtifact |
python | kubernetes-client__python | kubernetes/client/models/v1_client_ip_config.py | {
"start": 383,
"end": 3945
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'timeout_seconds': 'int'
}
attribute_map = {
'timeout_seconds': 'timeoutSeconds'
}
def __init__(self, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
"""V1ClientIPConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._timeout_seconds = None
self.discriminator = None
if timeout_seconds is not None:
self.timeout_seconds = timeout_seconds
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this V1ClientIPConfig. # noqa: E501
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours). # noqa: E501
:return: The timeout_seconds of this V1ClientIPConfig. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this V1ClientIPConfig.
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours). # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1ClientIPConfig. # noqa: E501
:type: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ClientIPConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ClientIPConfig):
return True
return self.to_dict() != other.to_dict()
| V1ClientIPConfig |
python | Pylons__pyramid | tests/test_config/test_assets.py | {
"start": 37365,
"end": 37899
} | class ____:
def __call__(self, package, path, source, _info=''):
self.package = package
self.path = path
self.source = source
def read_(src):
with open(src, 'rb') as f:
contents = f.read()
return contents
def _assertBody(body, filename):
# strip both \n and \r for windows
body = body.replace(b'\r', b'')
body = body.replace(b'\n', b'')
data = read_(filename)
data = data.replace(b'\r', b'')
data = data.replace(b'\n', b'')
assert body == data
| DummyUnderOverride |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/bind_override.py | {
"start": 494,
"end": 985
} | class ____(App):
BINDINGS = [
Binding("space", "app.bell", "Bell (App)"),
Binding("c", "app.notify('c app')", "app"),
Binding("a", "app.notify('a app')", "app"),
Binding("b", "app.notify('b app')", "app"),
]
def compose(self) -> ComposeResult:
yield MyWidget()
yield Switch()
yield Footer()
def action_notify(self, msg: str):
self.notify(msg)
if __name__ == "__main__":
app = BindApp()
app.run()
| BindApp |
python | coleifer__peewee | tests/regressions.py | {
"start": 51311,
"end": 51431
} | class ____(TestModel):
id = IntegerField(primary_key=True)
cpk = ForeignKeyField(CharPK, field=CharPK.name)
| CharFK |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 122372,
"end": 125470
} | class ____(TypedDict, total=False):
body: str
trace_id: str
replay_id: str
severity_text: str
severity_number: int
trace_flags: int
item_id: int
def scalar_to_any_value(value: Any) -> AnyValue:
if isinstance(value, str):
return AnyValue(string_value=value)
if isinstance(value, int):
return AnyValue(int_value=value)
if isinstance(value, float):
return AnyValue(double_value=value)
if isinstance(value, bool):
return AnyValue(bool_value=value)
if isinstance(value, dict):
return AnyValue(**value)
raise Exception(f"cannot convert {value} of type {type(value)} to AnyValue")
def span_to_trace_item(span) -> TraceItem:
client_sample_rate = 1.0
server_sample_rate = 1.0
attributes = {}
for field in {"tags", "data"}:
for k, v in span.get(field, {}).items():
if v is None:
continue
attributes[k] = scalar_to_any_value(v)
for k, v in span.get("sentry_tags", {}).items():
if v is None:
continue
if k == "description":
k = "normalized_description"
attributes[f"sentry.{k}"] = scalar_to_any_value(v)
for k, v in span.get("measurements", {}).items():
if v is None or v["value"] is None:
continue
if k == "client_sample_rate":
client_sample_rate = v["value"]
elif k == "server_sample_rate":
server_sample_rate = v["value"]
else:
attributes[k] = scalar_to_any_value(float(v["value"]))
if "description" in span and span["description"] is not None:
description = scalar_to_any_value(span["description"])
attributes["sentry.raw_description"] = description
for field in {
"duration_ms",
"end_timestamp_precise",
"event_id",
"exclusive_time_ms",
"is_segment",
"parent_span_id",
"profile_id",
"received",
"segment_id",
"start_timestamp_precise",
}:
if field in span and span[field] is not None:
if field == "is_segment":
is_segment = span["is_segment"]
attributes["sentry.is_segment"] = AnyValue(
double_value=float(is_segment),
)
else:
value = scalar_to_any_value(span[field])
attributes[f"sentry.{field}"] = value
timestamp = Timestamp()
timestamp.FromMilliseconds(span["start_timestamp_ms"])
return TraceItem(
organization_id=span["organization_id"],
project_id=span["project_id"],
item_type=TraceItemType.TRACE_ITEM_TYPE_SPAN,
timestamp=timestamp,
trace_id=span["trace_id"],
item_id=int(span["span_id"], 16).to_bytes(
16,
byteorder="little",
signed=False,
),
received=timestamp,
retention_days=90,
attributes=attributes,
client_sample_rate=client_sample_rate,
server_sample_rate=server_sample_rate,
)
| _OptionalOurLogData |
python | tensorflow__tensorflow | tensorflow/python/data/ops/readers.py | {
"start": 19464,
"end": 21807
} | class ____(dataset_ops.DatasetSource):
"""A `Dataset` of fixed-length records from one or more binary files."""
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None,
compression_type=None,
name=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in each
record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
name: (Optional.) A name for the tf.data operation.
"""
self._filenames = filenames
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
self._header_bytes = convert.optional_param_to_tensor(
"header_bytes", header_bytes)
self._footer_bytes = convert.optional_param_to_tensor(
"footer_bytes", footer_bytes)
self._buffer_size = convert.optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
self._name = name
variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2(
self._filenames,
self._header_bytes,
self._record_bytes,
self._footer_bytes,
self._buffer_size,
self._compression_type,
metadata=self._metadata.SerializeToString())
super(_FixedLengthRecordDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.string)
@tf_export("data.FixedLengthRecordDataset", v1=[])
| _FixedLengthRecordDataset |
python | tensorflow__tensorflow | tensorflow/python/training/proximal_gradient_descent.py | {
"start": 1101,
"end": 4517
} | class ____(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the proximal gradient descent algorithm.
References:
Efficient Learning using Forward-Backward Splitting:
[Duchi et al., 2009](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting)
([pdf](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf))
"""
def __init__(self, learning_rate, l1_regularization_strength=0.0,
l2_regularization_strength=0.0, use_locking=False,
name="ProximalGradientDescent"):
"""Construct a new proximal gradient descent optimizer.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "GradientDescent".
"""
super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
def _apply_dense(self, grad, var):
return gen_training_ops.apply_proximal_gradient_descent(
var,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
return gen_training_ops.resource_apply_proximal_gradient_descent(
var.handle,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
return gen_training_ops.sparse_apply_proximal_gradient_descent(
var,
self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad.values,
grad.indices,
use_locking=self._use_locking).op
def _resource_apply_sparse(self, grad, var, indices):
return gen_training_ops.resource_sparse_apply_proximal_gradient_descent(
var.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
| ProximalGradientDescentOptimizer |
python | networkx__networkx | networkx/algorithms/tests/test_summarization.py | {
"start": 16598,
"end": 19030
} | class ____(AbstractSNAP):
def build_original_graph(self):
nodes = {
"A": {"color": "Red"},
"B": {"color": "Red"},
"C": {"color": "Red"},
"D": {"color": "Blue"},
"E": {"color": "Blue"},
"F": {"color": "Blue"},
"G": {"color": "Yellow"},
"H": {"color": "Yellow"},
"I": {"color": "Yellow"},
}
edges = [
("A", "D", ["Weak", "Strong"]),
("B", "E", ["Weak", "Strong"]),
("D", "I", ["Strong"]),
("E", "H", ["Strong"]),
("F", "G", ["Weak"]),
("I", "G", ["Weak", "Strong"]),
("I", "H", ["Weak", "Strong"]),
("G", "H", ["Weak", "Strong"]),
]
G = nx.MultiGraph()
for node in nodes:
attributes = nodes[node]
G.add_node(node, **attributes)
for source, target, types in edges:
for type in types:
G.add_edge(source, target, type=type)
return G
def build_summary_graph(self):
nodes = {
"Supernode-0": {"color": "Red"},
"Supernode-1": {"color": "Blue"},
"Supernode-2": {"color": "Yellow"},
"Supernode-3": {"color": "Blue"},
"Supernode-4": {"color": "Yellow"},
"Supernode-5": {"color": "Red"},
}
edges = [
("Supernode-1", "Supernode-2", [{"type": "Weak"}]),
("Supernode-2", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]),
("Supernode-3", "Supernode-4", [{"type": "Strong"}]),
("Supernode-3", "Supernode-5", [{"type": "Weak"}, {"type": "Strong"}]),
("Supernode-4", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]),
]
G = nx.MultiGraph()
for node in nodes:
attributes = nodes[node]
G.add_node(node, **attributes)
for source, target, types in edges:
for type in types:
G.add_edge(source, target, type=type)
supernodes = {
"Supernode-0": {"A", "B"},
"Supernode-1": {"C", "D"},
"Supernode-2": {"E", "F"},
"Supernode-3": {"G", "H"},
"Supernode-4": {"I", "J"},
"Supernode-5": {"K", "L"},
}
nx.set_node_attributes(G, supernodes, "group")
return G
| TestSNAPUndirectedMulti |
python | django__django | tests/template_tests/syntax_tests/test_autoescape.py | {
"start": 186,
"end": 6535
} | class ____(SimpleTestCase):
@setup({"autoescape-tag01": "{% autoescape off %}hello{% endautoescape %}"})
def test_autoescape_tag01(self):
output = self.engine.render_to_string("autoescape-tag01")
self.assertEqual(output, "hello")
@setup({"autoescape-tag02": "{% autoescape off %}{{ first }}{% endautoescape %}"})
def test_autoescape_tag02(self):
output = self.engine.render_to_string(
"autoescape-tag02", {"first": "<b>hello</b>"}
)
self.assertEqual(output, "<b>hello</b>")
@setup({"autoescape-tag03": "{% autoescape on %}{{ first }}{% endautoescape %}"})
def test_autoescape_tag03(self):
output = self.engine.render_to_string(
"autoescape-tag03", {"first": "<b>hello</b>"}
)
self.assertEqual(output, "<b>hello</b>")
# Autoescape disabling and enabling nest in a predictable way.
@setup(
{
"autoescape-tag04": (
"{% autoescape off %}{{ first }} {% autoescape on %}{{ first }}"
"{% endautoescape %}{% endautoescape %}"
)
}
)
def test_autoescape_tag04(self):
output = self.engine.render_to_string("autoescape-tag04", {"first": "<a>"})
self.assertEqual(output, "<a> <a>")
@setup({"autoescape-tag05": "{% autoescape on %}{{ first }}{% endautoescape %}"})
def test_autoescape_tag05(self):
output = self.engine.render_to_string(
"autoescape-tag05", {"first": "<b>first</b>"}
)
self.assertEqual(output, "<b>first</b>")
# Strings (ASCII or Unicode) already marked as "safe" are not
# auto-escaped
@setup({"autoescape-tag06": "{{ first }}"})
def test_autoescape_tag06(self):
output = self.engine.render_to_string(
"autoescape-tag06", {"first": mark_safe("<b>first</b>")}
)
self.assertEqual(output, "<b>first</b>")
@setup({"autoescape-tag07": "{% autoescape on %}{{ first }}{% endautoescape %}"})
def test_autoescape_tag07(self):
output = self.engine.render_to_string(
"autoescape-tag07", {"first": mark_safe("<b>Apple</b>")}
)
self.assertEqual(output, "<b>Apple</b>")
@setup(
{
"autoescape-tag08": (
r'{% autoescape on %}{{ var|default_if_none:" endquote\" hah" }}'
r"{% endautoescape %}"
)
}
)
def test_autoescape_tag08(self):
"""
Literal string arguments to filters, if used in the result, are safe.
"""
output = self.engine.render_to_string("autoescape-tag08", {"var": None})
self.assertEqual(output, ' endquote" hah')
# Objects which return safe strings as their __str__ method
# won't get double-escaped.
@setup({"autoescape-tag09": r"{{ unsafe }}"})
def test_autoescape_tag09(self):
output = self.engine.render_to_string(
"autoescape-tag09", {"unsafe": UnsafeClass()}
)
self.assertEqual(output, "you & me")
@setup({"autoescape-tag10": r"{{ safe }}"})
def test_autoescape_tag10(self):
output = self.engine.render_to_string("autoescape-tag10", {"safe": SafeClass()})
self.assertEqual(output, "you > me")
@setup(
{
"autoescape-filtertag01": (
"{{ first }}{% filter safe %}{{ first }} x<y{% endfilter %}"
)
}
)
def test_autoescape_filtertag01(self):
"""
The "safe" and "escape" filters cannot work due to internal
implementation details (fortunately, the (no)autoescape block
tags can be used in those cases)
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("autoescape-filtertag01", {"first": "<a>"})
# Arguments to filters are 'safe' and manipulate their input unescaped.
@setup({"autoescape-filters01": '{{ var|cut:"&" }}'})
def test_autoescape_filters01(self):
output = self.engine.render_to_string(
"autoescape-filters01", {"var": "this & that"}
)
self.assertEqual(output, "this that")
@setup({"autoescape-filters02": '{{ var|join:" & " }}'})
def test_autoescape_filters02(self):
output = self.engine.render_to_string(
"autoescape-filters02", {"var": ("Tom", "Dick", "Harry")}
)
self.assertEqual(output, "Tom & Dick & Harry")
@setup({"autoescape-literals01": '{{ "this & that" }}'})
def test_autoescape_literals01(self):
"""
Literal strings are safe.
"""
output = self.engine.render_to_string("autoescape-literals01")
self.assertEqual(output, "this & that")
@setup({"autoescape-stringiterations01": "{% for l in var %}{{ l }},{% endfor %}"})
def test_autoescape_stringiterations01(self):
"""
Iterating over strings outputs safe characters.
"""
output = self.engine.render_to_string(
"autoescape-stringiterations01", {"var": "K&R"}
)
self.assertEqual(output, "K,&,R,")
@setup({"autoescape-lookup01": "{{ var.key }}"})
def test_autoescape_lookup01(self):
"""
Escape requirement survives lookup.
"""
output = self.engine.render_to_string(
"autoescape-lookup01", {"var": {"key": "this & that"}}
)
self.assertEqual(output, "this & that")
@setup(
{
"autoescape-incorrect-arg": (
"{% autoescape true %}{{ var.key }}{% endautoescape %}"
)
}
)
def test_invalid_arg(self):
msg = "'autoescape' argument should be 'on' or 'off'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string(
"autoescape-incorrect-arg", {"var": {"key": "this & that"}}
)
@setup(
{"autoescape-incorrect-arg": "{% autoescape %}{{ var.key }}{% endautoescape %}"}
)
def test_no_arg(self):
msg = "'autoescape' tag requires exactly one argument."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string(
"autoescape-incorrect-arg", {"var": {"key": "this & that"}}
)
| AutoescapeTagTests |
python | h5py__h5py | h5py/tests/test_big_endian_file.py | {
"start": 1010,
"end": 1469
} | class ____(TestCase):
def test_simple_int_be(self):
name = make_name()
fname = self.mktemp()
arr = np.ndarray(shape=(1,), dtype=">i4", buffer=bytearray([0, 1, 3, 2]))
be_number = 0 * 256 ** 3 + 1 * 256 ** 2 + 3 * 256 ** 1 + 2 * 256 ** 0
with File(fname, mode="w") as f:
f.create_dataset(name, data=arr)
with File(fname, mode="r") as f:
assert f[name][()][0] == be_number
| TestEndianess |
python | tensorflow__tensorflow | tensorflow/python/util/nest_test.py | {
"start": 2755,
"end": 2822
} | class ____(MaskedTensor):
pass
@dataclasses.dataclass
| MaskedTensor2 |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 61830,
"end": 62227
} | class ____(_PrintableStructure):
_fields_ = [
('fieldId', c_uint32),
('scopeId', c_uint32),
('timestamp', c_int64),
('latencyUsec', c_int64),
('valueType', _nvmlValueType_t),
('nvmlReturn', _nvmlReturn_t),
('value', c_nvmlValue_t)
]
NVML_NVLINK_TOTAL_SUPPORTED_BW_MODES = 23
nvmlNvlinkSupportedBwModes_v1 = 0x100001c
| c_nvmlFieldValue_t |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_export.py | {
"start": 31002,
"end": 33662
} | class ____(AdminTestMixin, TestCase):
# Test that Dates, Booleans, numbers etc are retained as native types
# when exporting to XLSX, XLS, ODS (see #1939)
class DeclaredModelFieldBookResource(resources.ModelResource):
# declare a field and enforce export output as str (coerce_to_string)
id = fields.Field(
attribute="id",
widget=widgets.NumberWidget(coerce_to_string=True),
)
imported = fields.Field(
attribute="imported",
widget=widgets.BooleanWidget(coerce_to_string=True),
)
published = fields.Field(
attribute="published",
widget=widgets.DateWidget("%d.%m.%Y", coerce_to_string=True),
)
class Meta:
model = Book
export_order = ("id", "imported", "published")
def test_dynamic_type_export(self):
Book.objects.create(id=101, published=datetime(2010, 8, 2), imported=True)
data = {
"format": "2",
"bookresource_id": True,
"bookresource_imported": True,
"bookresource_published": True,
}
self._prepend_form_prefix(data)
response = self.client.post(self.book_export_url, data)
self.assertEqual(response.status_code, 200)
content = response.content
wb = load_workbook(filename=BytesIO(content))
self.assertEqual(101, wb.active["A2"].value)
self.assertEqual(True, wb.active["B2"].value)
self.assertEqual(datetime(2010, 8, 2), wb.active["C2"].value)
@patch("import_export.mixins.BaseExportMixin.choose_export_resource_class")
def test_dynamic_export_with_custom_resource(
self, mock_choose_export_resource_class
):
# Test that `coerce_to_string` is ignored
mock_choose_export_resource_class.return_value = (
self.DeclaredModelFieldBookResource
)
Book.objects.create(id=101, published=date(2000, 8, 2), imported=True)
data = {
"format": "2",
"bookresource_id": True,
"bookresource_imported": True,
"bookresource_published": True,
}
self._prepend_form_prefix(data)
response = self.client.post(self.book_export_url, data)
self.assertEqual(response.status_code, 200)
content = response.content
wb = load_workbook(filename=BytesIO(content))
self.assertEqual(101, wb.active["A2"].value)
self.assertEqual(1, wb.active["B2"].value)
self.assertEqual(datetime(2000, 8, 2), wb.active["C2"].value)
@override_settings(USE_TZ=True, TIME_ZONE="UTC")
| ExportBinaryFieldsTest |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/regression.py | {
"start": 5694,
"end": 6886
} | class ____(Regression):
"""Performs a non-linear transformation of the data before fitting the model
and doing predictions which allows for doing non-linear regression.
Parameters:
-----------
degree: int
The degree of the polynomial that the independent variable X will be transformed to.
n_iterations: float
The number of training iterations the algorithm will tune the weights for.
learning_rate: float
The step length that will be used when updating the weights.
"""
def __init__(self, degree, n_iterations=3000, learning_rate=0.001):
self.degree = degree
# No regularization
self.regularization = lambda x: 0
self.regularization.grad = lambda x: 0
super(PolynomialRegression, self).__init__(n_iterations=n_iterations,
learning_rate=learning_rate)
def fit(self, X, y):
X = polynomial_features(X, degree=self.degree)
super(PolynomialRegression, self).fit(X, y)
def predict(self, X):
X = polynomial_features(X, degree=self.degree)
return super(PolynomialRegression, self).predict(X)
| PolynomialRegression |
python | kamyu104__LeetCode-Solutions | Python/get-biggest-three-rhombus-sums-in-a-grid.py | {
"start": 64,
"end": 1451
} | class ____(object):
def getBiggestThree(self, grid):
"""
:type grid: List[List[int]]
:rtype: List[int]
"""
K = 3
left = [[grid[i][j] for j in xrange(len(grid[i]))] for i in xrange(len(grid))]
right = [[grid[i][j] for j in xrange(len(grid[i]))] for i in xrange(len(grid))]
for i in xrange(1, len(grid)):
for j in xrange(len(grid[0])-1):
left[i][j] += left[i-1][j+1]
for i in xrange(1, len(grid)):
for j in xrange(1, len(grid[0])):
right[i][j] += right[i-1][j-1]
min_heap = []
lookup = set()
for k in xrange((min(len(grid), len(grid[0]))+1)//2):
for i in xrange(k, len(grid)-k):
for j in xrange(k, len(grid[0])-k):
total = (((left[i][j-k]-left[i-k][j])+(right[i][j+k]-right[i-k][j])+grid[i-k][j]) +
((left[i+k][j]-left[i][j+k])+(right[i+k][j]-right[i][j-k])-grid[i+k][j])) if k else grid[i][j]
if total in lookup:
continue
lookup.add(total)
heapq.heappush(min_heap, total)
if len(min_heap) == K+1:
lookup.remove(heapq.heappop(min_heap))
min_heap.sort(reverse=True)
return min_heap
| Solution |
python | gevent__gevent | examples/webpy.py | {
"start": 335,
"end": 450
} | class ____(object):
def GET(self):
return '<html>Hello, world!<br><a href="/long">/long</a></html>'
| index |
python | getsentry__sentry | tests/sentry/plugins/test_repository_provider.py | {
"start": 241,
"end": 1961
} | class ____(TestCase):
def test_needs_auth_for_user(self) -> None:
user = self.create_user()
provider = DummyRepositoryProvider(id="dummy")
# if no org is provided, user needs auth
assert provider.needs_auth(user) is True
UserSocialAuth.objects.create(provider="dummy", user=user)
assert provider.needs_auth(user) is False
def test_needs_auth_for_organization(self) -> None:
user = self.create_user()
provider = DummyRepositoryProvider(id="dummy")
org = self.create_organization()
integration = self.create_provider_integration(provider="dummy", external_id="123456")
integration.add_organization(org, user)
assert provider.needs_auth(user, organization=org) is False
def test_get_auth_for_user(self) -> None:
user = self.create_user()
provider = DummyRepositoryProvider(id="dummy")
assert provider.get_auth(user) is None
usa = UserSocialAuth.objects.create(provider="dummy", user=user)
auth = provider.get_auth(user)
assert auth
assert auth.id == usa.id
def test_get_auth_for_organization(self) -> None:
user = self.create_user()
user2 = self.create_user()
provider = DummyRepositoryProvider(id="dummy")
usa = UserSocialAuth.objects.create(provider="dummy", user=user2)
org = self.create_organization()
integration = self.create_provider_integration(provider="dummy", external_id="123456")
integration.add_organization(org, user, default_auth_id=usa.id)
auth = provider.get_auth(user, organization=org)
assert auth
assert auth.id == usa.id
| RepositoryProviderTest |
python | walkccc__LeetCode | solutions/3326. Minimum Division Operations to Make Array Non Decreasing/3326.py | {
"start": 0,
"end": 487
} | class ____:
def minOperations(self, nums: list[int]) -> int:
ans = 0
for i in range(len(nums) - 2, -1, -1):
if nums[i] > nums[i + 1]:
minDivisor = self._getMinDivisor(nums[i])
if minDivisor > nums[i + 1]:
return -1
nums[i] = minDivisor
ans += 1
return ans
def _getMinDivisor(self, num: int) -> int:
for divisor in range(2, math.isqrt(num) + 1):
if num % divisor == 0:
return divisor
return num
| Solution |
python | pytorch__pytorch | torch/backends/cuda/__init__.py | {
"start": 1180,
"end": 1711
} | class ____:
# Like regular ContextProp, but uses the `.device_index` attribute from the
# calling object as the first argument to the getter and setter.
def __init__(self, getter, setter):
self.getter = getter
self.setter = setter
def __get__(self, obj, objtype):
return self.getter(obj.device_index)
def __set__(self, obj, val):
if isinstance(self.setter, str):
raise RuntimeError(self.setter)
self.setter(obj.device_index, val)
| cuFFTPlanCacheAttrContextProp |
python | sphinx-doc__sphinx | sphinx/directives/admonitions.py | {
"start": 1298,
"end": 1368
} | class ____(SphinxAdmonition):
node_class = nodes.attention
| Attention |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/google/tests.py | {
"start": 8787,
"end": 13514
} | class ____(GoogleTests):
"""
Run the same set of tests but without having a SocialApp entry.
"""
pass
def test_login_by_token(db, client, settings_with_google_provider):
client.cookies.load({"g_csrf_token": "csrf"})
with patch(
"allauth.socialaccount.internal.jwtkit.jwt.get_unverified_header"
) as g_u_h:
with mocked_response({"dummykid": "-----BEGIN CERTIFICATE-----"}):
with patch(
"allauth.socialaccount.internal.jwtkit.load_pem_x509_certificate"
) as load_pem:
with patch(
"allauth.socialaccount.internal.jwtkit.jwt.decode"
) as decode:
decode.return_value = {
"iss": "https://accounts.google.com",
"aud": "client_id",
"sub": "123sub",
"hd": "example.com",
"email": "raymond@example.com",
"email_verified": True,
"at_hash": "HK6E_P6Dh8Y93mRNtsDB1Q",
"name": "Raymond Penners",
"picture": "https://lh5.googleusercontent.com/photo.jpg",
"given_name": "Raymond",
"family_name": "Penners",
"locale": "en",
"iat": 123,
"exp": 456,
}
g_u_h.return_value = {
"alg": "RS256",
"kid": "dummykid",
"typ": "JWT",
}
pem = Mock()
load_pem.return_value = pem
pem.public_key.return_value = "key"
resp = client.post(
reverse("google_login_by_token"),
{"credential": "dummy", "g_csrf_token": "csrf"},
)
assert resp.status_code == HTTPStatus.FOUND
socialaccount = SocialAccount.objects.get(uid="123sub")
assert socialaccount.user.email == "raymond@example.com"
@pytest.mark.parametrize(
"id_key,verified_key",
[
("id", "email_verified"),
("sub", "verified_email"),
],
)
@pytest.mark.parametrize("verified", [False, True])
def test_extract_data(
id_key, verified_key, verified, settings_with_google_provider, db
):
data = {
"email": "a@b.com",
}
data[id_key] = "123"
data[verified_key] = verified
provider = get_adapter().get_provider(None, GoogleProvider.id)
assert provider.extract_uid(data) == "123"
emails = provider.extract_email_addresses(data)
assert len(emails) == 1
assert emails[0].verified == verified
assert emails[0].email == "a@b.com"
@pytest.mark.parametrize(
"fetch_userinfo,id_token_has_picture,response,expected_uid, expected_picture",
[
(True, True, {"id_token": "123"}, "uid-from-id-token", "pic-from-id-token"),
(True, False, {"id_token": "123"}, "uid-from-id-token", "pic-from-userinfo"),
(True, True, {"access_token": "123"}, "uid-from-userinfo", "pic-from-userinfo"),
],
)
@pytest.mark.parametrize("did_fetch_access_token", [False, True])
def test_complete_login_variants(
response,
settings_with_google_provider,
db,
fetch_userinfo,
expected_uid,
expected_picture,
id_token_has_picture,
did_fetch_access_token,
):
with patch.object(
GoogleOAuth2Adapter,
"_fetch_user_info",
return_value={
"id": "uid-from-userinfo",
"picture": "pic-from-userinfo",
},
):
id_token = {"sub": "uid-from-id-token"}
if id_token_has_picture:
id_token["picture"] = "pic-from-id-token"
with patch(
"allauth.socialaccount.providers.google.views._verify_and_decode",
return_value=id_token,
) as decode_mock:
request = None
app = None
adapter = GoogleOAuth2Adapter(request)
adapter.did_fetch_access_token = did_fetch_access_token
adapter.fetch_userinfo = fetch_userinfo
token = SocialToken()
login = adapter.complete_login(request, app, token, response)
assert login.account.uid == expected_uid
assert login.account.extra_data["picture"] == expected_picture
if not response.get("id_token"):
assert not decode_mock.called
else:
assert decode_mock.call_args[1]["verify_signature"] == (
not did_fetch_access_token
)
| AppInSettingsTests |
python | openai__openai-python | src/openai/types/beta/assistant_update_params.py | {
"start": 6289,
"end": 6612
} | class ____(TypedDict, total=False):
file_ids: SequenceNotStr[str]
"""
Overrides the list of
[file](https://platform.openai.com/docs/api-reference/files) IDs made available
to the `code_interpreter` tool. There can be a maximum of 20 files associated
with the tool.
"""
| ToolResourcesCodeInterpreter |
python | django__django | tests/migrations/migrations_test_apps/lookuperror_c/migrations/0002_c2.py | {
"start": 43,
"end": 690
} | class ____(migrations.Migration):
dependencies = [
("lookuperror_a", "0002_a2"),
("lookuperror_c", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="C2",
fields=[
(
"id",
models.AutoField(
auto_created=True,
verbose_name="ID",
primary_key=True,
serialize=False,
),
),
("a1", models.ForeignKey("lookuperror_a.A1", models.CASCADE)),
],
),
]
| Migration |
python | realpython__materials | python-mixins/mixins.py | {
"start": 255,
"end": 468
} | class ____:
@classmethod
def from_json(cls, json_string: str) -> Self:
return cls(**json.loads(json_string))
def as_json(self) -> str:
return json.dumps(vars(self))
| JSONSerializableMixin |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-ai21/llama_index/llms/ai21/base.py | {
"start": 1708,
"end": 15490
} | class ____(FunctionCallingLLM):
"""
AI21 Labs LLM.
Examples:
`pip install llama-index-llms-ai21`
```python
from llama_index.llms.ai21 import AI21
llm = AI21(model="jamba-instruct", api_key=api_key)
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(
description="The AI21 model to use.", default=_DEFAULT_AI21_MODEL
)
max_tokens: int = Field(
description="The maximum number of tokens to generate.",
default=_DEFAULT_MAX_TOKENS,
gt=0,
)
temperature: float = Field(
description="The temperature to use for sampling.",
default=_DEFAULT_TEMPERATURE,
ge=0.0,
le=1.0,
)
base_url: Optional[str] = Field(default=None, description="The base URL to use.")
timeout: Optional[float] = Field(
default=None, description="The timeout to use in seconds.", ge=0
)
max_retries: int = Field(
default=10, description="The maximum number of API retries.", ge=0
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the anthropic API."
)
_client: Any = PrivateAttr()
_async_client: Any = PrivateAttr()
def __init__(
self,
model: str = _DEFAULT_AI21_MODEL,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
max_tokens: Optional[int] = _DEFAULT_MAX_TOKENS,
max_retries: int = 10,
default_headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = _DEFAULT_TEMPERATURE,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
"""Initialize params."""
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
super().__init__(
model=model,
max_tokens=max_tokens,
temperature=temperature,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._client = AI21Client(
api_key=api_key,
api_host=base_url,
timeout_sec=timeout,
num_retries=max_retries,
headers=default_headers,
via="llama-index",
)
self._async_client = AsyncAI21Client(
api_key=api_key,
api_host=base_url,
timeout_sec=timeout,
num_retries=max_retries,
headers=default_headers,
via="llama-index",
)
@classmethod
def class_name(cls) -> str:
"""Get Class Name."""
return "AI21_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=ai21_model_to_context_size(self.model),
num_output=self.max_tokens,
model_name=self.model,
is_function_calling_model=is_function_calling_model(
model=self.model,
),
is_chat_model=True,
)
@property
def tokenizer(self) -> BaseTokenizer:
return Tokenizer.get_tokenizer(_TOKENIZER_MAP.get(self.model))
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
return {**base_kwargs, **self.additional_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
if self._is_j2_model():
return self._j2_completion(prompt, formatted, **all_kwargs)
completion_fn = chat_to_completion_decorator(self.chat)
return completion_fn(prompt, **all_kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self._is_j2_model():
raise ValueError("Stream completion is not supported for J2 models.")
all_kwargs = self._get_all_kwargs(**kwargs)
completion_fn = stream_chat_to_completion_decorator(self.stream_chat)
return completion_fn(prompt, **all_kwargs)
def _prepare_chat_with_tools(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False, # ai21 does not support configuring the tool_choice
**kwargs: Any,
) -> Dict[str, Any]:
tool_specs = [tool.metadata.to_openai_tool() for tool in tools]
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
messages = chat_history or []
if user_msg:
messages.append(user_msg)
return {
"messages": messages,
"tools": tool_specs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
if self._is_j2_model():
return self._j2_chat(messages, **all_kwargs)
messages = [message_to_ai21_message(message) for message in messages]
response = self._client.chat.completions.create(
messages=messages,
stream=False,
**all_kwargs,
)
message = from_ai21_message_to_chat_message(response.choices[0].message)
return ChatResponse(
message=message,
raw=response.to_dict(),
)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
if self._is_j2_model():
return await self._j2_async_chat(messages, **all_kwargs)
messages = [message_to_ai21_message(message) for message in messages]
response = await self._async_client.chat.completions.create(
messages=messages,
stream=False,
**all_kwargs,
)
message = from_ai21_message_to_chat_message(response.choices[0].message)
return ChatResponse(
message=message,
raw=response.to_dict(),
)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
if self._is_j2_model():
raise ValueError("Async Stream chat is not supported for J2 models.")
all_kwargs = self._get_all_kwargs(**kwargs)
messages = [message_to_ai21_message(message) for message in messages]
response = await self._async_client.chat.completions.create(
messages=messages,
stream=True,
**all_kwargs,
)
async def gen() -> ChatResponseAsyncGen:
content = ""
role = MessageRole.ASSISTANT
async for r in response:
if isinstance(r, ChatCompletionChunk):
content_delta = r.choices[0].delta.content
if content_delta is None:
content += ""
else:
content += r.choices[0].delta.content
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=r.to_dict(),
)
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
if self._is_j2_model():
return await self._j2_async_complete(prompt, formatted, **all_kwargs)
acomplete_fn = achat_to_completion_decorator(self.achat)
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat)
return await astream_complete_fn(prompt, **kwargs)
def _j2_chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
system, messages = message_to_ai21_j2_message(messages)
response = self._client.chat.create(
system=system,
messages=messages,
stream=False,
**kwargs,
)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=response.outputs[0].text,
),
raw=response.to_dict(),
)
async def _j2_async_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
system, messages = message_to_ai21_j2_message(messages)
response = await self._async_client.chat.create(
system=system,
messages=messages,
stream=False,
**kwargs,
)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content=response.outputs[0].text,
),
raw=response.to_dict(),
)
async def _j2_async_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response = await self._async_client.completion.create(
prompt=prompt,
stream=False,
**kwargs,
)
return CompletionResponse(
text=response.completions[0].data.text,
raw=response.to_dict(),
)
def _j2_completion(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response = self._client.completion.create(
prompt=prompt,
stream=False,
**kwargs,
)
return CompletionResponse(
text=response.completions[0].data.text,
raw=response.to_dict(),
)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self._is_j2_model():
raise ValueError("Stream chat is not supported for J2 models.")
all_kwargs = self._get_all_kwargs(**kwargs)
messages = [message_to_ai21_message(message) for message in messages]
response = self._client.chat.completions.create(
messages=messages,
stream=True,
**all_kwargs,
)
def gen() -> ChatResponseGen:
content = ""
role = MessageRole.ASSISTANT
for r in response:
if isinstance(r, ChatCompletionChunk):
content_delta = r.choices[0].delta.content
if content_delta is None:
content += ""
else:
content += r.choices[0].delta.content
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=r.to_dict(),
)
return gen()
def _is_j2_model(self) -> bool:
return "j2" in self.model
def _parse_tool(self, tool_call: ToolCall) -> ToolSelection:
if not isinstance(tool_call, ToolCall):
raise ValueError("Invalid tool_call object")
if tool_call.type != "function":
raise ValueError(f"Unsupported tool call type: {tool_call.type}")
try:
argument_dict = parse_partial_json(tool_call.function.arguments)
except ValueError:
argument_dict = {}
return ToolSelection(
tool_id=tool_call.id,
tool_name=tool_call.function.name,
tool_kwargs=argument_dict,
)
def get_tool_calls_from_response(
self,
response: ChatResponse,
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
return [self._parse_tool(tool_call) for tool_call in tool_calls]
| AI21 |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-ibm/llama_index/llms/ibm/base.py | {
"start": 1604,
"end": 23701
} | class ____(FunctionCallingLLM):
"""
IBM watsonx.ai large language models.
Example:
`pip install llama-index-llms-ibm`
```python
from llama_index.llms.ibm import WatsonxLLM
watsonx_llm = WatsonxLLM(
model_id="google/flan-ul2",
url="https://us-south.ml.cloud.ibm.com",
apikey="*****",
project_id="*****",
)
```
"""
model_id: Optional[str] = Field(
default=None, description="Type of model to use.", frozen=True
)
deployment_id: Optional[str] = Field(
default=None, description="Id of deployed model to use.", frozen=True
)
temperature: Optional[float] = Field(
default=None,
description="The temperature to use for sampling.",
)
max_new_tokens: Optional[int] = Field(
default=None,
description="The maximum number of tokens to generate.",
)
additional_params: Optional[Dict[str, Any]] = Field(
default_factory=None,
description="Additional generation params for the watsonx.ai models.",
)
project_id: Optional[str] = Field(
default=None,
description="ID of the Watson Studio project.",
frozen=True,
)
space_id: Optional[str] = Field(
default=None, description="ID of the Watson Studio space.", frozen=True
)
url: Optional[SecretStr] = Field(
default=None,
description="Url to the IBM watsonx.ai for IBM Cloud or the IBM watsonx.ai software instance.",
frozen=True,
)
apikey: Optional[SecretStr] = Field(
default=None,
description="API key to the IBM watsonx.ai for IBM Cloud or the IBM watsonx.ai software instance.",
frozen=True,
)
token: Optional[SecretStr] = Field(
default=None,
description="Token to the IBM watsonx.ai software instance.",
frozen=True,
)
password: Optional[SecretStr] = Field(
default=None,
description="Password to the IBM watsonx.ai software instance.",
frozen=True,
)
username: Optional[SecretStr] = Field(
default=None,
description="Username to the IBM watsonx.ai software instance.",
frozen=True,
)
instance_id: Optional[SecretStr] = Field(
default=None,
description="Instance_id of the IBM watsonx.ai software instance.",
frozen=True,
deprecated="The `instance_id` parameter is deprecated and will no longer be utilized for logging to the IBM watsonx.ai software instance.",
)
version: Optional[SecretStr] = Field(
default=None,
description="Version of the IBM watsonx.ai software instance.",
frozen=True,
)
verify: Union[str, bool, None] = Field(
default=None,
description="""
User can pass as verify one of following:
the path to a CA_BUNDLE file
the path of directory with certificates of trusted CAs
True - default path to truststore will be taken
False - no verification will be made
""",
frozen=True,
)
validate_model: bool = Field(
default=True, description="Model id validation", frozen=True
)
# Enabled by default since IBM watsonx SDK 1.1.2 but it can cause problems
# in environments where long-running connections are not supported.
persistent_connection: bool = Field(
default=True, description="Use persistent connection"
)
_model: ModelInference = PrivateAttr()
_client: Optional[APIClient] = PrivateAttr()
_model_info: Optional[Dict[str, Any]] = PrivateAttr()
_deployment_info: Optional[Dict[str, Any]] = PrivateAttr()
_context_window: Optional[int] = PrivateAttr()
_text_generation_params: Dict[str, Any] | None = PrivateAttr()
def __init__(
self,
model_id: Optional[str] = None,
deployment_id: Optional[str] = None,
temperature: Optional[float] = None,
max_new_tokens: Optional[int] = None,
additional_params: Optional[Dict[str, Any]] = None,
project_id: Optional[str] = None,
space_id: Optional[str] = None,
url: Optional[str] = None,
apikey: Optional[str] = None,
token: Optional[str] = None,
password: Optional[str] = None,
username: Optional[str] = None,
version: Optional[str] = None,
verify: Union[str, bool, None] = None,
api_client: Optional[APIClient] = None,
validate_model: bool = True,
persistent_connection: bool = True,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""
Initialize LLM and watsonx.ai ModelInference.
"""
callback_manager = callback_manager or CallbackManager([])
additional_params = additional_params or {}
creds = (
resolve_watsonx_credentials(
url=url,
apikey=apikey,
token=token,
username=username,
password=password,
)
if not isinstance(api_client, APIClient)
else {}
)
super().__init__(
model_id=model_id,
deployment_id=deployment_id,
temperature=temperature,
max_new_tokens=max_new_tokens,
additional_params=additional_params,
project_id=project_id,
space_id=space_id,
url=creds.get("url"),
apikey=creds.get("apikey"),
token=creds.get("token"),
password=creds.get("password"),
username=creds.get("username"),
version=version,
verify=verify,
_client=api_client,
validate_model=validate_model,
persistent_connection=persistent_connection,
callback_manager=callback_manager,
**kwargs,
)
self._context_window = kwargs.get("context_window")
generation_params = {}
if self.temperature is not None:
generation_params["temperature"] = self.temperature
if self.max_new_tokens is not None:
generation_params["max_new_tokens"] = self.max_new_tokens
generation_params = {**generation_params, **additional_params}
if generation_params:
self._text_generation_params, _ = self._split_generation_params(
generation_params
)
else:
self._text_generation_params = None
self._client = api_client
self._model = ModelInference(
model_id=model_id,
deployment_id=deployment_id,
credentials=(
Credentials.from_dict(
{
key: value.get_secret_value() if value else None
for key, value in self._get_credential_kwargs().items()
},
_verify=self.verify,
)
if creds
else None
),
params=self._text_generation_params,
project_id=self.project_id,
space_id=self.space_id,
api_client=api_client,
validate=validate_model,
persistent_connection=persistent_connection,
)
self._model_info = None
self._deployment_info = None
model_config = ConfigDict(protected_namespaces=(), validate_assignment=True)
@property
def model_info(self):
if self._model.model_id and self._model_info is None:
self._model_info = self._model.get_details()
return self._model_info
@property
def deployment_info(self):
if self._model.deployment_id and self._deployment_info is None:
self._deployment_info = self._model.get_details()
return self._deployment_info
@classmethod
def class_name(cls) -> str:
"""Get Class Name."""
return "WatsonxLLM"
def _get_credential_kwargs(self) -> Dict[str, SecretStr | None]:
return {
"url": self.url,
"apikey": self.apikey,
"token": self.token,
"password": self.password,
"username": self.username,
"version": self.version,
}
@property
def metadata(self) -> LLMMetadata:
if self.model_id and self._context_window is None:
model_id = self.model_id
self._context_window = self.model_info.get("model_limits", {}).get(
"max_sequence_length"
)
elif self._context_window is None:
model_id = self.deployment_info.get("entity", {}).get("base_model_id")
self._context_window = (
self._model._client.foundation_models.get_model_specs(model_id=model_id)
.get("model_limits", {})
.get("max_sequence_length")
)
return LLMMetadata(
context_window=self._context_window or DEFAULT_CONTEXT_WINDOW,
num_output=self.max_new_tokens or DEFAULT_MAX_TOKENS,
model_name=self.model_id
or self.deployment_info.get("entity", {}).get(
"base_model_id", self._model.deployment_id
),
)
@property
def sample_generation_text_params(self) -> Dict[str, Any]:
"""Example of Model generation text kwargs that a user can pass to the model."""
return GenTextParamsMetaNames().get_example_values()
@property
def sample_chat_generation_params(self) -> Dict[str, Any]:
"""Example of Model chat generation kwargs that a user can pass to the model."""
return GenChatParamsMetaNames().get_example_values()
def _split_generation_params(
self, data: Dict[str, Any]
) -> Tuple[Dict[str, Any] | None, Dict[str, Any]]:
params = {}
kwargs = {}
sample_generation_kwargs_keys = set(self.sample_generation_text_params.keys())
sample_generation_kwargs_keys.add("prompt_variables")
for key, value in data.items():
if key in sample_generation_kwargs_keys:
params.update({key: value})
else:
kwargs.update({key: value})
return params if params else None, kwargs
def _split_chat_generation_params(
self, data: Dict[str, Any]
) -> Tuple[Dict[str, Any] | None, Dict[str, Any]]:
params = {}
kwargs = {}
sample_generation_kwargs_keys = set(self.sample_chat_generation_params.keys())
for key, value in data.items():
if key in sample_generation_kwargs_keys:
params.update({key: value})
else:
kwargs.update({key: value})
return params if params else None, kwargs
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
params, generation_kwargs = self._split_generation_params(kwargs)
if "use_completions" in generation_kwargs:
del generation_kwargs["use_completions"]
response = self._model.generate(
prompt=prompt,
params=self._text_generation_params or params,
**generation_kwargs,
)
return CompletionResponse(
text=self._model._return_guardrails_stats(response).get("generated_text"),
raw=response,
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
params, generation_kwargs = self._split_generation_params(kwargs)
if "use_completions" in generation_kwargs:
del generation_kwargs["use_completions"]
response = await self._model.agenerate(
prompt=prompt,
params=self._text_generation_params or params,
**generation_kwargs,
)
return CompletionResponse(
text=self._model._return_guardrails_stats(response).get("generated_text"),
raw=response,
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
params, generation_kwargs = self._split_generation_params(kwargs)
stream_response = self._model.generate_text_stream(
prompt=prompt,
params=self._text_generation_params or params,
**generation_kwargs,
)
def gen() -> CompletionResponseGen:
content = ""
if kwargs.get("raw_response"):
for stream_delta in stream_response:
stream_delta_text = self._model._return_guardrails_stats(
stream_delta
).get("generated_text", "")
content += stream_delta_text
yield CompletionResponse(
text=content, delta=stream_delta_text, raw=stream_delta
)
else:
for stream_delta in stream_response:
content += stream_delta
yield CompletionResponse(text=content, delta=stream_delta)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, formatted=formatted, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_dicts = [to_watsonx_message_dict(message) for message in messages]
params, generation_kwargs = self._split_chat_generation_params(kwargs)
response = self._model.chat(
messages=message_dicts,
params=params,
tools=generation_kwargs.get("tools"),
tool_choice=generation_kwargs.get("tool_choice"),
tool_choice_option=generation_kwargs.get("tool_choice_option"),
)
wx_message = response["choices"][0]["message"]
message = from_watsonx_message(wx_message)
return ChatResponse(
message=message,
raw=response,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if kwargs.get("use_completions"):
chat_fn = completion_to_chat_decorator(self.complete)
else:
chat_fn = self._chat
return chat_fn(messages, **kwargs)
async def _achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
message_dicts = [to_watsonx_message_dict(message) for message in messages]
params, generation_kwargs = self._split_chat_generation_params(kwargs)
response = await self._model.achat(
messages=message_dicts,
params=params,
tools=generation_kwargs.get("tools"),
tool_choice=generation_kwargs.get("tool_choice"),
tool_choice_option=generation_kwargs.get("tool_choice_option"),
)
wx_message = response["choices"][0]["message"]
message = from_watsonx_message(wx_message)
return ChatResponse(
message=message,
raw=response,
)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
if kwargs.get("use_completions"):
achat_fn = acompletion_to_chat_decorator(self.acomplete)
else:
achat_fn = self._achat
return await achat_fn(messages, **kwargs)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
message_dicts = [to_watsonx_message_dict(message) for message in messages]
params, generation_kwargs = self._split_chat_generation_params(kwargs)
stream_response = self._model.chat_stream(
messages=message_dicts,
params=params,
tools=generation_kwargs.get("tools"),
tool_choice=generation_kwargs.get("tool_choice"),
tool_choice_option=generation_kwargs.get("tool_choice_option"),
)
def stream_gen() -> ChatResponseGen:
content = ""
role = None
tool_calls = []
for response in stream_response:
tools_available = False
delta = ""
additional_kwargs = {}
if response["choices"]:
wx_message = response["choices"][0]["delta"]
role = wx_message.get("role") or role or MessageRole.ASSISTANT
delta = wx_message.get("content", "")
content += delta
if "tool_calls" in wx_message:
tools_available = True
if tools_available:
tool_calls = update_tool_calls(
tool_calls, wx_message["tool_calls"]
)
if tool_calls:
additional_kwargs["tool_calls"] = tool_calls
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return stream_gen()
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if kwargs.get("use_completions"):
chat_stream_fn = stream_completion_to_chat_decorator(self.stream_complete)
else:
chat_stream_fn = self._stream_chat
return chat_stream_fn(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
def _prepare_chat_with_tools(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False,
tool_choice: Optional[str] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Predict and call the tool."""
# watsonx uses the same openai tool format
tool_specs = [tool.metadata.to_openai_tool() for tool in tools]
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
messages = chat_history or []
if user_msg:
messages.append(user_msg)
chat_with_tools_payload = {
"messages": messages,
"tools": tool_specs or None,
**kwargs,
}
if tool_required and tool_choice is None:
# NOTE: watsonx can only require a single tool
tool_choice = tools[0].metadata.name if len(tools) > 0 else None
if tool_choice is not None:
chat_with_tools_payload.update(
{"tool_choice": {"type": "function", "function": {"name": tool_choice}}}
)
return chat_with_tools_payload
def get_tool_calls_from_response(
self,
response: ChatResponse,
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
if not isinstance(tool_call, dict):
raise ValueError("Invalid tool_call object")
if tool_call.get("type") != "function":
raise ValueError("Invalid tool type. Unsupported by watsonx.ai")
# this should handle both complete and partial jsons
try:
argument_dict = parse_partial_json(
tool_call.get("function", {}).get("arguments")
)
except ValueError:
argument_dict = {}
tool_selections.append(
ToolSelection(
tool_id=tool_call.get("id"),
tool_name=tool_call.get("function").get("name"),
tool_kwargs=argument_dict,
)
)
return tool_selections
def _get_response_token_counts(self, raw_response: Any) -> dict:
"""Get the token usage reported by the response."""
if isinstance(raw_response, dict):
usage = raw_response.get("usage", {})
if not usage:
return {}
prompt_tokens = usage.get("prompt_tokens", 0)
completion_tokens = usage.get("completion_tokens", 0)
total_tokens = usage.get("total_tokens", 0)
else:
return {}
return {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": total_tokens,
}
| WatsonxLLM |
python | pandas-dev__pandas | pandas/tests/frame/test_unary.py | {
"start": 114,
"end": 5744
} | class ____:
# __pos__, __neg__, __invert__
@pytest.mark.parametrize(
"df_data,expected_data",
[
([-1, 1], [1, -1]),
([False, True], [True, False]),
(pd.to_timedelta([-1, 1]), pd.to_timedelta([1, -1])),
],
)
def test_neg_numeric(self, df_data, expected_data):
df = pd.DataFrame({"a": df_data})
expected = pd.DataFrame({"a": expected_data})
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df, expected",
[
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),
],
)
def test_neg_object(self, df, expected):
# GH#21380
df = pd.DataFrame({"a": df})
expected = pd.DataFrame({"a": expected})
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df_data",
[
["a", "b"],
pd.to_datetime(["2017-01-22", "1970-01-01"]),
],
)
def test_neg_raises(self, df_data, using_infer_string):
df = pd.DataFrame({"a": df_data})
msg = (
"bad operand type for unary -: 'str'|"
r"bad operand type for unary -: 'DatetimeArray'|"
"unary '-' not supported for dtype"
)
with pytest.raises(TypeError, match=msg):
(-df)
with pytest.raises(TypeError, match=msg):
(-df["a"])
def test_invert(self, float_frame):
df = float_frame
tm.assert_frame_equal(-(df < 0), ~(df < 0))
def test_invert_mixed(self):
shape = (10, 5)
df = pd.concat(
[
pd.DataFrame(np.zeros(shape, dtype="bool")),
pd.DataFrame(np.zeros(shape, dtype=int)),
],
axis=1,
ignore_index=True,
)
result = ~df
expected = pd.concat(
[
pd.DataFrame(np.ones(shape, dtype="bool")),
pd.DataFrame(-np.ones(shape, dtype=int)),
],
axis=1,
ignore_index=True,
)
tm.assert_frame_equal(result, expected)
def test_invert_empty_not_input(self):
# GH#51032
df = pd.DataFrame()
result = ~df
tm.assert_frame_equal(df, result)
assert df is not result
@pytest.mark.parametrize(
"df_data",
[
[-1, 1],
[False, True],
pd.to_timedelta([-1, 1]),
],
)
def test_pos_numeric(self, df_data):
# GH#16073
df = pd.DataFrame({"a": df_data})
tm.assert_frame_equal(+df, df)
tm.assert_series_equal(+df["a"], df["a"])
@pytest.mark.parametrize(
"df_data",
[
np.array([-1, 2], dtype=object),
[Decimal("-1.0"), Decimal("2.0")],
],
)
def test_pos_object(self, df_data):
# GH#21380
df = pd.DataFrame({"a": df_data})
tm.assert_frame_equal(+df, df)
tm.assert_series_equal(+df["a"], df["a"])
@pytest.mark.filterwarnings("ignore:Applying:DeprecationWarning")
def test_pos_object_raises(self):
# GH#21380
df = pd.DataFrame({"a": ["a", "b"]})
with pytest.raises(
TypeError, match=r"^bad operand type for unary \+: \'str\'$"
):
tm.assert_frame_equal(+df, df)
def test_pos_raises(self):
df = pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})
msg = r"bad operand type for unary \+: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
(+df)
with pytest.raises(TypeError, match=msg):
(+df["a"])
def test_unary_nullable(self):
df = pd.DataFrame(
{
"a": pd.array([1, -2, 3, pd.NA], dtype="Int64"),
"b": pd.array([4.0, -5.0, 6.0, pd.NA], dtype="Float32"),
"c": pd.array([True, False, False, pd.NA], dtype="boolean"),
# include numpy bool to make sure bool-vs-boolean behavior
# is consistent in non-NA locations
"d": np.array([True, False, False, True]),
}
)
result = +df
res_ufunc = np.positive(df)
expected = df
# TODO: assert that we have copies?
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(res_ufunc, expected)
result = -df
res_ufunc = np.negative(df)
expected = pd.DataFrame(
{
"a": pd.array([-1, 2, -3, pd.NA], dtype="Int64"),
"b": pd.array([-4.0, 5.0, -6.0, pd.NA], dtype="Float32"),
"c": pd.array([False, True, True, pd.NA], dtype="boolean"),
"d": np.array([False, True, True, False]),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(res_ufunc, expected)
result = abs(df)
res_ufunc = np.abs(df)
expected = pd.DataFrame(
{
"a": pd.array([1, 2, 3, pd.NA], dtype="Int64"),
"b": pd.array([4.0, 5.0, 6.0, pd.NA], dtype="Float32"),
"c": pd.array([True, False, False, pd.NA], dtype="boolean"),
"d": np.array([True, False, False, True]),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(res_ufunc, expected)
| TestDataFrameUnaryOperators |
python | tensorflow__tensorflow | tensorflow/python/ops/gradient_checker_v2_test.py | {
"start": 9532,
"end": 12964
} | class ____(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias]
# Now, Building MNIST
def f(inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias):
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
return cost
def f_restricted(x):
xs = all_params
i = param_index
# use x for the i-th parameter
xs = xs[0:i] + [x] + xs[i + 1:]
return f(*xs)
# Test the gradients.
err = gradient_checker.max_error(*gradient_checker.compute_gradient(
f_restricted, [all_params[param_index]], delta=1e-5))
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
| MiniMNISTTest |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 4226,
"end": 5726
} | class ____(ModelOutput):
r"""
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the Deformable DETR encoder-decoder model.
"""
)
| TestDetrDecoderOutput |
python | OmkarPathak__pygorithm | tests/test_math.py | {
"start": 156,
"end": 372
} | class ____(unittest.TestCase):
def test_lcm(self):
self.assertEqual(lcm.lcm([3, 12, 16]), 48)
def test_lcm_using_gcd(self):
self.assertEqual(lcm_using_gcd.lcm_using_gcd([3, 12, 16]), 48)
| TestLCM |
python | apache__thrift | lib/py/src/protocol/TJSONProtocol.py | {
"start": 1951,
"end": 2301
} | class ____(object):
def __init__(self, protocol):
self.protocol = protocol
self.first = True
def doIO(self, function):
pass
def write(self):
pass
def read(self):
pass
def escapeNum(self):
return False
def __str__(self):
return self.__class__.__name__
| JSONBaseContext |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 51889,
"end": 54211
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: SeamlessM4TConfig, encoder_ffn_dim=None, encoder_attention_heads=None):
super().__init__()
encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim
encoder_attention_heads = (
config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads
)
self.embed_dim = config.hidden_size
self.self_attn = SeamlessM4TAttention(
embed_dim=self.embed_dim,
num_heads=encoder_attention_heads,
dropout=config.attention_dropout,
)
self.attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.ffn = SeamlessM4TFeedForwardNetwork(config, ffn_dim=encoder_ffn_dim)
self.ffn_layer_norm = nn.LayerNorm(config.hidden_size)
self.ffn_dropout = nn.Dropout(config.activation_dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool = False,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`):
input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`):
attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
large negative values.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ffn_layer_norm(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = self.ffn_dropout(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| SeamlessM4TEncoderLayer |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 198245,
"end": 198363
} | class ____(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
| SmallBufferedFileObjectClassTestCase |
python | pypa__pip | src/pip/_vendor/packaging/markers.py | {
"start": 1058,
"end": 1190
} | class ____(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
| UndefinedComparison |
python | yandexdataschool__Practical_RL | week07_seq2seq/basic_model_torch.py | {
"start": 313,
"end": 7322
} | class ____(nn.Module):
def __init__(self, inp_voc, out_voc,
emb_size, hid_size,):
super(self.__class__, self).__init__()
self.inp_voc = inp_voc
self.out_voc = out_voc
self.emb_inp = nn.Embedding(len(inp_voc), emb_size)
self.emb_out = nn.Embedding(len(out_voc), emb_size)
self.enc0 = nn.GRU(emb_size, hid_size, batch_first=True)
self.dec_start = nn.Linear(hid_size, hid_size)
self.dec0 = nn.GRUCell(emb_size, hid_size)
self.logits = nn.Linear(hid_size, len(out_voc))
def encode(self, inp, **flags):
"""
Takes symbolic input sequence, computes initial state
:param inp: input tokens, int64 vector of shape [batch]
:return: a list of initial decoder state tensors
"""
inp_emb = self.emb_inp(inp)
enc_seq, _ = self.enc0(inp_emb)
# select last element w.r.t. mask
end_index = infer_length(inp, self.inp_voc.eos_ix)
end_index[end_index >= inp.shape[1]] = inp.shape[1] - 1
enc_last = enc_seq[range(0, enc_seq.shape[0]), end_index.detach(), :]
dec_start = self.dec_start(enc_last)
return [dec_start]
def decode(self, prev_state, prev_tokens, **flags):
"""
Takes previous decoder state and tokens, returns new state and logits
:param prev_state: a list of previous decoder state tensors
:param prev_tokens: previous output tokens, an int vector of [batch_size]
:return: a list of next decoder state tensors, a tensor of logits [batch,n_tokens]
"""
[prev_dec] = prev_state
prev_emb = self.emb_out(prev_tokens)
new_dec_state = self.dec0(prev_emb, prev_dec)
output_logits = self.logits(new_dec_state)
return [new_dec_state], output_logits
def forward(self, inp, out, eps=1e-30, **flags):
"""
Takes symbolic int32 matrices of hebrew words and their english translations.
Computes the log-probabilities of all possible english characters given english prefices and hebrew word.
:param inp: input sequence, int32 matrix of shape [batch,time]
:param out: output sequence, int32 matrix of shape [batch,time]
:return: log-probabilities of all possible english characters of shape [bath,time,n_tokens]
Note: log-probabilities time axis is synchronized with out
In other words, logp are probabilities of __current__ output at each tick, not the next one
therefore you can get likelihood as logprobas * tf.one_hot(out,n_tokens)
"""
device = next(self.parameters()).device
batch_size = inp.shape[0]
bos = torch.tensor(
[self.out_voc.bos_ix] * batch_size,
dtype=torch.long,
device=device,
)
logits_seq = [torch.log(to_one_hot(bos, len(self.out_voc)) + eps)]
hid_state = self.encode(inp, **flags)
for x_t in out.transpose(0, 1)[:-1]:
hid_state, logits = self.decode(hid_state, x_t, **flags)
logits_seq.append(logits)
return F.log_softmax(torch.stack(logits_seq, dim=1), dim=-1)
def translate(self, inp, greedy=False, max_len=None, eps=1e-30, **flags):
"""
takes symbolic int32 matrix of hebrew words, produces output tokens sampled
from the model and output log-probabilities for all possible tokens at each tick.
:param inp: input sequence, int32 matrix of shape [batch,time]
:param greedy: if greedy, takes token with highest probablity at each tick.
Otherwise samples proportionally to probability.
:param max_len: max length of output, defaults to 2 * input length
:return: output tokens int32[batch,time] and
log-probabilities of all tokens at each tick, [batch,time,n_tokens]
"""
device = next(self.parameters()).device
batch_size = inp.shape[0]
bos = torch.tensor(
[self.out_voc.bos_ix] * batch_size,
dtype=torch.long,
device=device,
)
mask = torch.ones(batch_size, dtype=torch.uint8, device=device)
logits_seq = [torch.log(to_one_hot(bos, len(self.out_voc)) + eps)]
out_seq = [bos]
hid_state = self.encode(inp, **flags)
while True:
hid_state, logits = self.decode(hid_state, out_seq[-1], **flags)
if greedy:
_, y_t = torch.max(logits, dim=-1)
else:
probs = F.softmax(logits, dim=-1)
y_t = torch.multinomial(probs, 1)[:, 0]
logits_seq.append(logits)
out_seq.append(y_t)
mask &= y_t != self.out_voc.eos_ix
if not mask.any():
break
if max_len and len(out_seq) >= max_len:
break
return (
torch.stack(out_seq, 1),
F.log_softmax(torch.stack(logits_seq, 1), dim=-1),
)
### Utility functions ###
def infer_mask(
seq,
eos_ix,
batch_first=True,
include_eos=True,
dtype=torch.float):
"""
compute mask given output indices and eos code
:param seq: tf matrix [time,batch] if batch_first else [batch,time]
:param eos_ix: integer index of end-of-sentence token
:param include_eos: if True, the time-step where eos first occurs is has mask = 1
:returns: mask, float32 matrix with '0's and '1's of same shape as seq
"""
assert seq.dim() == 2
is_eos = (seq == eos_ix).to(dtype=torch.float)
if include_eos:
if batch_first:
is_eos = torch.cat((is_eos[:, :1] * 0, is_eos[:, :-1]), dim=1)
else:
is_eos = torch.cat((is_eos[:1, :] * 0, is_eos[:-1, :]), dim=0)
count_eos = torch.cumsum(is_eos, dim=1 if batch_first else 0)
mask = count_eos == 0
return mask.to(dtype=dtype)
def infer_length(
seq,
eos_ix,
batch_first=True,
include_eos=True,
dtype=torch.long):
"""
compute length given output indices and eos code
:param seq: tf matrix [time,batch] if time_major else [batch,time]
:param eos_ix: integer index of end-of-sentence token
:param include_eos: if True, the time-step where eos first occurs is has mask = 1
:returns: lengths, int32 vector of shape [batch]
"""
mask = infer_mask(seq, eos_ix, batch_first, include_eos, dtype)
return torch.sum(mask, dim=1 if batch_first else 0)
def to_one_hot(y, n_dims=None):
""" Take integer y (tensor or variable) with n dims and convert it to 1-hot representation with n+1 dims. """
y_tensor = y.data
y_tensor = y_tensor.to(dtype=torch.long).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(
y_tensor.size()[0],
n_dims,
device=y.device,
).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(*y.shape, -1)
return y_one_hot
| BasicTranslationModel |
python | numba__numba | numba/testing/main.py | {
"start": 22642,
"end": 22728
} | class ____(runner.TextTestRunner):
resultclass = RefleakTestResult
| RefleakTestRunner |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 175478,
"end": 181252
} | class ____(_CompressedMixin, sparse_test_class()):
@classmethod
def spcreator(cls, *args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
return csc_array(*args, **kwargs)
math_dtypes = [np.bool_, np.int_, np.float64, np.complex128]
def test_constructor1(self):
b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd')
bsp = self.csc_container(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.nnz,4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.format,'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = self.csc_container(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = array([[1, 0], [0, 0], [0, 2]], 'd')
bsp = self.csc_container(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = self.csc_container((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4, 3), csc.toarray())
# with duplicates (should sum the duplicates)
csc = self.csc_container(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
assert csc.nnz == 2
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = self.csc_container((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_constructor6(self):
# infer dimensions and dtype from lists
indptr = [0, 1, 3, 3]
indices = [0, 5, 1, 2]
data = [1, 2, 3, 4]
csc = self.csc_container((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
assert_(np.issubdtype(csc.dtype, np.signedinteger))
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = self.csc_container((data, indices, indptr), shape=(10,2))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.toarray(), bsp.toarray())
def test_sort_indices(self):
data = arange(5)
row = array([7, 2, 1, 5, 4])
ptr = [0, 3, 5]
asp = self.csc_container((data, row, ptr), shape=(10,2))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.toarray(), bsp.toarray())
def test_ufuncs(self):
X = self.csc_container(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(self.datsp, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = self.csc_container((data, indices, indptr), shape=(10,2))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = self.csc_container((data, indices, indptr), shape=(10,2))
assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = self.asdense(np.random.rand(5, 7))
S = self.spcreator(D)
SIJ = S[I,J]
if issparse(SIJ):
SIJ = SIJ.toarray()
assert_equal(SIJ, D[I,J])
def test_scalar_idx_dtype(self):
# Check that index dtype takes into account all parameters
# passed to sparsetools, including the scalar ones
indptr = np.zeros(2, dtype=np.int32)
indices = np.zeros(0, dtype=np.int32)
vals = np.zeros(0)
a = self.csc_container((vals, indices, indptr), shape=(2**31-1, 1))
b = self.csc_container((vals, indices, indptr), shape=(2**31, 1))
ij = np.zeros((2, 0), dtype=np.int32)
c = self.csc_container((vals, ij), shape=(2**31-1, 1))
d = self.csc_container((vals, ij), shape=(2**31, 1))
e = self.csr_container((1, 2**31-1))
f = self.csr_container((1, 2**31))
assert_equal(a.indptr.dtype, np.int32)
assert_equal(b.indptr.dtype, np.int64)
assert_equal(c.indptr.dtype, np.int32)
assert_equal(d.indptr.dtype, np.int64)
assert_equal(e.indptr.dtype, np.int32)
assert_equal(f.indptr.dtype, np.int64)
# These shouldn't fail
for x in [a, b, c, d, e, f]:
x + x
TestCSC.init_class()
| TestCSC |
python | numba__numba | numba/tests/test_comprehension.py | {
"start": 7500,
"end": 17716
} | class ____(unittest.TestCase):
_numba_parallel_test_ = False
def check(self, pyfunc, *args, **kwargs):
"""A generic check function that run both pyfunc, and jitted pyfunc,
and compare results."""
run_parallel = kwargs.get('run_parallel', False)
assert_allocate_list = kwargs.get('assert_allocate_list', False)
assert_dtype = kwargs.get('assert_dtype', False)
cfunc = jit(nopython=True,parallel=run_parallel)(pyfunc)
pyres = pyfunc(*args)
cres = cfunc(*args)
np.testing.assert_array_equal(pyres, cres)
if assert_dtype:
self.assertEqual(cres[1].dtype, assert_dtype)
if assert_allocate_list:
self.assertIn('allocate list', cfunc.inspect_llvm(cfunc.signatures[0]))
else:
self.assertNotIn('allocate list', cfunc.inspect_llvm(cfunc.signatures[0]))
if run_parallel:
self.assertIn('@do_scheduling', cfunc.inspect_llvm(cfunc.signatures[0]))
def test_comp_with_array_1(self):
def comp_with_array_1(n):
m = n * 2
l = np.array([i + m for i in range(n)])
return l
self.check(comp_with_array_1, 5)
if PARALLEL_SUPPORTED:
self.check(comp_with_array_1, 5, run_parallel=True)
def test_comp_with_array_2(self):
def comp_with_array_2(n, threshold):
A = np.arange(-n, n)
return np.array([ x * x if x < threshold else x * 2 for x in A ])
self.check(comp_with_array_2, 5, 0)
def test_comp_with_array_noinline(self):
def comp_with_array_noinline(n):
m = n * 2
l = np.array([i + m for i in range(n)])
return l
import numba.core.inline_closurecall as ic
try:
ic.enable_inline_arraycall = False
self.check(comp_with_array_noinline, 5, assert_allocate_list=True)
finally:
ic.enable_inline_arraycall = True
def test_comp_with_array_noinline_issue_6053(self):
def comp_with_array_noinline(n):
lst = [0]
for i in range(n):
lst.append(i)
l = np.array(lst)
return l
self.check(comp_with_array_noinline, 5, assert_allocate_list=True)
def test_comp_nest_with_array(self):
def comp_nest_with_array(n):
l = np.array([[i * j for j in range(n)] for i in range(n)])
return l
self.check(comp_nest_with_array, 5)
if PARALLEL_SUPPORTED:
self.check(comp_nest_with_array, 5, run_parallel=True)
def test_comp_nest_with_array_3(self):
def comp_nest_with_array_3(n):
l = np.array([[[i * j * k for k in range(n)] for j in range(n)] for i in range(n)])
return l
self.check(comp_nest_with_array_3, 5)
if PARALLEL_SUPPORTED:
self.check(comp_nest_with_array_3, 5, run_parallel=True)
def test_comp_nest_with_array_noinline(self):
def comp_nest_with_array_noinline(n):
l = np.array([[i * j for j in range(n)] for i in range(n)])
return l
import numba.core.inline_closurecall as ic
try:
ic.enable_inline_arraycall = False
self.check(comp_nest_with_array_noinline, 5,
assert_allocate_list=True)
finally:
ic.enable_inline_arraycall = True
def test_comp_with_array_range(self):
def comp_with_array_range(m, n):
l = np.array([i for i in range(m, n)])
return l
self.check(comp_with_array_range, 5, 10)
def test_comp_with_array_range_and_step(self):
def comp_with_array_range_and_step(m, n):
l = np.array([i for i in range(m, n, 2)])
return l
self.check(comp_with_array_range_and_step, 5, 10)
def test_comp_with_array_conditional(self):
def comp_with_array_conditional(n):
l = np.array([i for i in range(n) if i % 2 == 1])
return l
# arraycall inline would not happen when conditional is present
self.check(comp_with_array_conditional, 10, assert_allocate_list=True)
def test_comp_nest_with_array_conditional(self):
def comp_nest_with_array_conditional(n):
l = np.array([[i * j for j in range(n)] for i in range(n) if i % 2 == 1])
return l
self.check(comp_nest_with_array_conditional, 5,
assert_allocate_list=True)
@unittest.skipUnless(numpy_version < (1, 24),
'Setting an array element with a sequence is removed '
'in NumPy 1.24')
def test_comp_nest_with_dependency(self):
def comp_nest_with_dependency(n):
l = np.array([[i * j for j in range(i+1)] for i in range(n)])
return l
# test is expected to fail
with self.assertRaises(TypingError) as raises:
self.check(comp_nest_with_dependency, 5)
self.assertIn(_header_lead, str(raises.exception))
self.assertIn('array(undefined,', str(raises.exception))
def test_comp_unsupported_iter(self):
def comp_unsupported_iter():
val = zip([1, 2, 3], [4, 5, 6])
return np.array([a for a, b in val])
with self.assertRaises(TypingError) as raises:
self.check(comp_unsupported_iter)
self.assertIn(_header_lead, str(raises.exception))
self.assertIn('Unsupported iterator found in array comprehension',
str(raises.exception))
def test_no_array_comp(self):
def no_array_comp1(n):
l = [1,2,3,4]
a = np.array(l)
return a
# const 1D array is actually inlined
self.check(no_array_comp1, 10, assert_allocate_list=False)
def no_array_comp2(n):
l = [1,2,3,4]
a = np.array(l)
l.append(5)
return a
self.check(no_array_comp2, 10, assert_allocate_list=True)
def test_nested_array(self):
def nested_array(n):
l = np.array([ np.array([x for x in range(n)]) for y in range(n)])
return l
self.check(nested_array, 10)
def test_nested_array_with_const(self):
def nested_array(n):
l = np.array([ np.array([x for x in range(3)]) for y in range(4)])
return l
self.check(nested_array, 0)
def test_array_comp_with_iter(self):
def array_comp(a):
l = np.array([ x * x for x in a ])
return l
# with list iterator
l = [1,2,3,4,5]
self.check(array_comp, l)
# with array iterator
self.check(array_comp, np.array(l))
# with tuple iterator (issue #7394)
self.check(array_comp, tuple(l))
# with typed.List iterator (issue #6550)
self.check(array_comp, typed.List(l))
def test_array_comp_with_dtype(self):
def array_comp(n):
l = np.array([i for i in range(n)], dtype=np.complex64)
return l
self.check(array_comp, 10, assert_dtype=np.complex64)
def test_array_comp_inferred_dtype(self):
def array_comp(n):
l = np.array([i * 1j for i in range(n)])
return l
self.check(array_comp, 10)
def test_array_comp_inferred_dtype_nested(self):
def array_comp(n):
l = np.array([[i * j for j in range(n)] for i in range(n)])
return l
self.check(array_comp, 10)
def test_array_comp_inferred_dtype_nested_sum(self):
def array_comp(n):
l = np.array([[i * j for j in range(n)] for i in range(n)])
# checks that operations on the inferred array
return l
self.check(array_comp, 10)
def test_array_comp_inferred_dtype_outside_setitem(self):
def array_comp(n, v):
arr = np.array([i for i in range(n)])
# the following should not change the dtype
arr[0] = v
return arr
# float to int cast is valid
v = 1.2
self.check(array_comp, 10, v, assert_dtype=np.intp)
# complex to int cast is invalid
with self.assertRaises(TypingError) as raises:
cfunc = jit(nopython=True)(array_comp)
cfunc(10, 2.3j)
self.assertIn(
_header_lead + " Function({})".format(operator.setitem),
str(raises.exception),
)
self.assertIn(
"(array({}, 1d, C), Literal[int](0), complex128)".format(types.intp),
str(raises.exception),
)
def test_array_comp_shuffle_sideeffect(self):
nelem = 100
@jit(nopython=True)
def foo():
numbers = np.array([i for i in range(nelem)])
np.random.shuffle(numbers)
print(numbers)
with captured_stdout() as gotbuf:
foo()
got = gotbuf.getvalue().strip()
with captured_stdout() as expectbuf:
print(np.array([i for i in range(nelem)]))
expect = expectbuf.getvalue().strip()
# For a large enough array, the chances of shuffle to not move any
# element is tiny enough.
self.assertNotEqual(got, expect)
self.assertRegex(got, r'\[(\s*\d+)+\]')
def test_empty_list_not_removed(self):
# see issue #3724
def f(x):
t = []
myList = np.array([1])
a = np.random.choice(myList, 1)
t.append(x + a)
return a
self.check(f, 5, assert_allocate_list=True)
def test_reuse_of_array_var(self):
""" Test issue 3742 """
# redefinition of z breaks array comp as there's multiple defn
def foo(n):
# doesn't matter where this is in the code, it's just to ensure a
# `make_function` opcode exists
[i for i in range(1)]
z = np.empty(n)
for i in range(n):
z = np.zeros(n)
z[i] = i # write is required to trip the bug
return z
self.check(foo, 10, assert_allocate_list=True)
if __name__ == '__main__':
unittest.main()
| TestArrayComprehension |
python | huggingface__transformers | src/transformers/models/dab_detr/configuration_dab_detr.py | {
"start": 900,
"end": 13685
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DabDetrModel`]. It is used to instantiate
a DAB-DETR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the DAB-DETR
[IDEA-Research/dab_detr-base](https://huggingface.co/IDEA-Research/dab_detr-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PreTrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
backbone (`str`, *optional*, defaults to `"resnet50"`):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`DabDetrModel`] can detect in a single image. For COCO, we recommend 100 queries.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicates whether the transformer model architecture is an encoder-decoder or not.
activation_function (`str` or `function`, *optional*, defaults to `"prelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_size (`int`, *optional*, defaults to 256):
This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1.0):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 2):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
cls_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the classification loss in the object detection loss function.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
temperature_height (`int`, *optional*, defaults to 20):
Temperature parameter to tune the flatness of positional attention (HEIGHT)
temperature_width (`int`, *optional*, defaults to 20):
Temperature parameter to tune the flatness of positional attention (WIDTH)
query_dim (`int`, *optional*, defaults to 4):
Query dimension parameter represents the size of the output vector.
random_refpoints_xy (`bool`, *optional*, defaults to `False`):
Whether to fix the x and y coordinates of the anchor boxes with random initialization.
keep_query_pos (`bool`, *optional*, defaults to `False`):
Whether to concatenate the projected positional embedding from the object query into the original query (key) in every decoder layer.
num_patterns (`int`, *optional*, defaults to 0):
Number of pattern embeddings.
normalize_before (`bool`, *optional*, defaults to `False`):
Whether we use a normalization layer in the Encoder or not.
sine_position_embedding_scale (`float`, *optional*, defaults to 'None'):
Scaling factor applied to the normalized positional encodings.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
Examples:
```python
>>> from transformers import DabDetrConfig, DabDetrModel
>>> # Initializing a DAB-DETR IDEA-Research/dab_detr-base style configuration
>>> configuration = DabDetrConfig()
>>> # Initializing a model (with random weights) from the IDEA-Research/dab_detr-base style configuration
>>> model = DabDetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dab-detr"
sub_configs = {"backbone_config": AutoConfig}
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
use_timm_backbone=True,
backbone_config=None,
backbone="resnet50",
use_pretrained_backbone=True,
backbone_kwargs=None,
num_queries=300,
encoder_layers=6,
encoder_ffn_dim=2048,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=8,
is_encoder_decoder=True,
activation_function="prelu",
hidden_size=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
init_xavier_std=1.0,
auxiliary_loss=False,
dilation=False,
class_cost=2,
bbox_cost=5,
giou_cost=2,
cls_loss_coefficient=2,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
focal_alpha=0.25,
temperature_height=20,
temperature_width=20,
query_dim=4,
random_refpoints_xy=False,
keep_query_pos=False,
num_patterns=0,
normalize_before=False,
sine_position_embedding_scale=None,
initializer_bias_prior_prob=None,
**kwargs,
):
if query_dim != 4:
raise ValueError("The query dimensions has to be 4.")
# We default to values which were previously hard-coded in the model. This enables configurability of the config
# while keeping the default behavior the same.
if use_timm_backbone and backbone_kwargs is None:
backbone_kwargs = {}
if dilation:
backbone_kwargs["output_stride"] = 16
backbone_kwargs["out_indices"] = [1, 2, 3, 4]
backbone_kwargs["in_chans"] = 3 # num_channels
# Backwards compatibility
elif not use_timm_backbone and backbone in (None, "resnet50"):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
backbone = None
# set timm attributes to None
dilation = None
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_queries = num_queries
self.hidden_size = hidden_size
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.num_hidden_layers = encoder_layers
self.auxiliary_loss = auxiliary_loss
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.backbone_kwargs = backbone_kwargs
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.cls_loss_coefficient = cls_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.focal_alpha = focal_alpha
self.query_dim = query_dim
self.random_refpoints_xy = random_refpoints_xy
self.keep_query_pos = keep_query_pos
self.num_patterns = num_patterns
self.normalize_before = normalize_before
self.temperature_width = temperature_width
self.temperature_height = temperature_height
self.sine_position_embedding_scale = sine_position_embedding_scale
self.initializer_bias_prior_prob = initializer_bias_prior_prob
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
self.tie_encoder_decoder = True # weights have to be tied for this model
__all__ = ["DabDetrConfig"]
| DabDetrConfig |
python | automl__auto-sklearn | test/test_pipeline/components/regression/test_random_forests.py | {
"start": 161,
"end": 990
} | class ____(BaseRegressionComponentTest):
__test__ = True
res = dict()
res["default_boston"] = 0.8410063895401654
res["boston_n_calls"] = 9
res["default_boston_iterative"] = res["default_boston"]
res["default_boston_sparse"] = 0.4194462097407078
res["default_boston_iterative_sparse"] = res["default_boston_sparse"]
res["default_diabetes"] = 0.3496051170409269
res["diabetes_n_calls"] = 9
res["default_diabetes_iterative"] = res["default_diabetes"]
res["default_diabetes_sparse"] = 0.2383300978781976
res["default_diabetes_iterative_sparse"] = res["default_diabetes_sparse"]
sk_mod = sklearn.ensemble.RandomForestRegressor
module = RandomForest
step_hyperparameter = {
"name": "n_estimators",
"value": module.get_max_iter(),
}
| RandomForestComponentTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_col_breaks.py | {
"start": 301,
"end": 1333
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_col_breaks() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_col_breaks_1(self):
"""Test the _write_col_breaks() method"""
self.worksheet.vbreaks = [1]
self.worksheet._write_col_breaks()
exp = """<colBreaks count="1" manualBreakCount="1"><brk id="1" max="1048575" man="1"/></colBreaks>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_col_breaks_2(self):
"""Test the _write_col_breaks() method"""
self.worksheet.vbreaks = [8, 3, 1, 0]
self.worksheet._write_col_breaks()
exp = """<colBreaks count="3" manualBreakCount="3"><brk id="1" max="1048575" man="1"/><brk id="3" max="1048575" man="1"/><brk id="8" max="1048575" man="1"/></colBreaks>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteColBreaks |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/fifo_queue_test.py | {
"start": 15055,
"end": 16396
} | class ____(test.TestCase):
def testEnqueueWithShape(self):
with test_util.use_gpu():
q = data_flow_ops.GPUCompatibleFIFOQueue(
10, dtypes_lib.float32, shapes=(3, 2))
self.evaluate(q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],)))
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, self.evaluate(q.size()))
def testEnqueueDequeue(self):
with test_util.use_gpu():
q = data_flow_ops.GPUCompatibleFIFOQueue(10, dtypes_lib.float32)
elems_numpy = [10.0, 20.0, 30.0]
# The identity ensures constants are copied to the GPU immediately
elems = [array_ops.identity(constant_op.constant(x))
for x in elems_numpy]
for x in elems:
self.evaluate(q.enqueue((x,)))
for i in range(len(elems)):
dequeued_tensor = q.dequeue()
self.assertEqual(elems[0].device, dequeued_tensor.device)
vals = self.evaluate(dequeued_tensor)
self.assertEqual([elems_numpy[i]], vals)
@test_util.run_v1_only(
"These tests can likely run in 2.x with some fixes, but have not been "
"converted yet. Currently they hold on to operations and rely on "
"re-running them; for eager compatibility we need to 're-create' the op "
"each time.")
| GPUCompatibleFIFOQueueTests |
python | getsentry__sentry | tests/snuba/api/endpoints/test_discover_saved_queries.py | {
"start": 493,
"end": 1396
} | class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.projects = [
self.create_project(organization=self.org),
self.create_project(organization=self.org),
]
self.project_ids = [project.id for project in self.projects]
self.project_ids_without_access = [self.create_project().id]
query = {"fields": ["test"], "conditions": [], "limit": 10}
model = DiscoverSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Test query",
query=query,
version=1,
)
model.set_projects(self.project_ids)
@thread_leak_allowlist(reason="sentry sdk background worker", issue=97042)
| DiscoverSavedQueryBase |
python | tensorflow__tensorflow | tensorflow/python/eager/backprop_test.py | {
"start": 66718,
"end": 68619
} | class ____(test_util.TensorFlowTestCase):
def _assert_indexed_slices_equal(self, left, right):
self.assertAllEqual(
self.evaluate(ops.convert_to_tensor(left)),
self.evaluate(ops.convert_to_tensor(right)))
def testNoGradients(self):
self.assertIsNone(backprop_util.AggregateIndexedSlicesGradients([]))
def testOneGradient(self):
t = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
result = backprop_util.AggregateIndexedSlicesGradients([t])
self._assert_indexed_slices_equal(t, result)
def testMultipleGradients(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop_util.AggregateIndexedSlicesGradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
def testMultipleGradientsWithNones(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = math_ops._as_indexed_slices(
constant_op.constant([[0., 0.], [5, 6], [7., 8.]]))
t3 = None
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop_util.AggregateIndexedSlicesGradients([t0, t1, t3])
self._assert_indexed_slices_equal(total, result)
def testMixedTensorAndIndexedSlices(self):
t0 = math_ops._as_indexed_slices(
constant_op.constant([[1., 2.], [0, 0], [3., 4.]]))
t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]])
total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]])
result = backprop_util.AggregateIndexedSlicesGradients([t0, t1])
self._assert_indexed_slices_equal(total, result)
if __name__ == '__main__':
test.main()
| AggregateIndexedSlicesGradientsTest |
python | protocolbuffers__protobuf | python/google/protobuf/internal/type_checkers.py | {
"start": 3773,
"end": 4023
} | class ____(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, *acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
| TypeCheckerWithDefault |
python | qdrant__qdrant-client | qdrant_client/uploader/grpc_uploader.py | {
"start": 2708,
"end": 4936
} | class ____(BaseUploader):
def __init__(
self,
host: str,
port: int,
collection_name: str,
max_retries: int,
wait: bool = False,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
):
self.collection_name = collection_name
self._host = host
self._port = port
self.max_retries = max_retries
self._kwargs = kwargs
self._wait = wait
self._shard_key_selector = (
RestToGrpc.convert_shard_key_selector(shard_key_selector)
if shard_key_selector is not None
else None
)
self._timeout = kwargs.pop("timeout", None)
self._update_filter = (
RestToGrpc.convert_filter(update_filter)
if isinstance(update_filter, rest.Filter) # type: ignore[attr-defined]
else update_filter
)
@classmethod
def start(
cls,
collection_name: Optional[str] = None,
host: str = "localhost",
port: int = 6334,
max_retries: int = 3,
**kwargs: Any,
) -> "GrpcBatchUploader":
if not collection_name:
raise RuntimeError("Collection name could not be empty")
return cls(
host=host,
port=port,
collection_name=collection_name,
max_retries=max_retries,
**kwargs,
)
def process_upload(self, items: Iterable[Any]) -> Generator[bool, None, None]:
channel = get_channel(host=self._host, port=self._port, **self._kwargs)
points_client = grpc.PointsStub(channel)
for batch in items:
yield upload_batch_grpc(
points_client,
self.collection_name,
batch,
shard_key_selector=self._shard_key_selector,
update_filter=self._update_filter,
max_retries=self.max_retries,
wait=self._wait,
timeout=self._timeout,
)
def process(self, items: Iterable[Any]) -> Iterable[bool]:
yield from self.process_upload(items)
| GrpcBatchUploader |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 456174,
"end": 460262
} | class ____(Request):
"""
Mark a task status as published.
For Annotation tasks - if any changes were committed by this task,
a new version in the dataset together with an output view are created.
For Training tasks - if a model was created, it should be set to ready.
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param publish_model: Indicates that the task output model (if exists) should
be published. Optional, the default value is True.
:type publish_model: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "publish"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": ["boolean", "null"],
},
"publish_model": {
"description": (
"Indicates that the task output model (if exists) should be published. Optional, the default value "
"is True."
),
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
force=False,
publish_model=None,
status_reason=None,
status_message=None,
**kwargs
):
super(PublishRequest, self).__init__(**kwargs)
self.force = force
self.publish_model = publish_model
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("publish_model")
def publish_model(self):
return self._property_publish_model
@publish_model.setter
def publish_model(self, value):
if value is None:
self._property_publish_model = None
return
self.assert_isinstance(value, "publish_model", (bool,))
self._property_publish_model = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| PublishRequest |
python | pytorch__pytorch | test/nn/test_parametrization.py | {
"start": 854,
"end": 82206
} | class ____(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
# torch/nn/utils/parametrize
@skipIfNoLapack
@swap([True, False])
def test_register_and_remove_parametrization(self):
r"""Test that it is possible to add a few parametrizations
on a parameter or a buffer and that removing them restores the initial state
It also tests that backpropagating through them works as expected
"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
# Cayley map
# If X is skew-symmetric it returns an orthogonal matrix
Id = torch.eye(X.size(0), device=X.device)
# We call contiguous because solve returns a tensor with strides that are Fortran-contiguous
# and autograd raises a performance warning.
# This happens when we remove the parametrization with leave_parametrized=True,
# which does a set_ with a non-contiguous tensor while the gradient is contiguous
return torch.linalg.solve(Id + X, Id - X).contiguous()
class Resize(nn.Module):
def forward(self, X):
return X[[0]]
class NoResize(nn.Module):
def forward(self, X):
return X
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
initial_weight_id = id(model.weight)
initial_bias_id = id(model.bias)
initial_model = deepcopy(model)
# Test unsafe flag
with self.assertRaisesRegex(
ValueError,
"Registering a parametrization may not change the shape of the tensor",
):
parametrize.register_parametrization(
model, "weight", Resize()
) # default unsafe = False
model(torch.ones(8, 8))
# One parametrization with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
self.assertTrue(model.weight.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Two parametrizations with unsafe=True
parametrize.register_parametrization(model, "weight", Resize(), unsafe=True)
parametrize.register_parametrization(model, "weight", NoResize(), unsafe=False)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
self.assertTrue(model.weight.shape[0] == 1)
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test unsafe flag doesn't change expected behavior
parametrize.register_parametrization(model, "weight", Skew(), unsafe=True)
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del A
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test one parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del A
# Remove and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(model.__class__, nn.Linear)
# Test two parametrizations at the same time and removing them
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Result should be orthogonal
X = model.weight
Id = torch.eye(X.size(0), device=X.device)
self.assertEqual(X.T @ X, Id)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del X
# Structure tests
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertIn("weight", model.parametrizations)
self.assertNotIn("weight", model._parameters)
# Remove
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertEqual(model.weight, initial_model.weight)
self.assertEqual(id(model.weight), initial_weight_id)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
# Add everything
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
# Basic tests
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.0)
self.assertEqual(model.bias[-1].item(), 0.0)
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened
# Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.01)
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove first parametrization.
# Check that the model is still parametrized and so is the second parameter
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertTrue(parametrize.is_parametrized(model)) # Still parametrized
self.assertFalse(
parametrize.is_parametrized(model, "weight")
) # Parametrization removed
self.assertTrue(
parametrize.is_parametrized(model, "bias")
) # Still parametrized
self.assertEqual(model.bias[0].item(), 0.0) # Still parametrized
self.assertEqual(model.bias[-1].item(), 0.0) # Still parametrized
self.assertNotEqual(model.weight, initial_model.weight) # Has been updated
self.assertEqual(id(model.weight), initial_weight_id) # Keeps the same id
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened
# Should not throw
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
# Remove the second parametrization.
# Check that the module is not parametrized
parametrize.remove_parametrizations(model, "bias", leave_parametrized=False)
self.assertFalse(parametrize.is_parametrized(model)) # Not parametrized
self.assertNotEqual(model.bias, initial_model.bias) # Has been updated
self.assertNotEqual(model.bias[0].item(), 0.0) # Not parametrized
self.assertNotEqual(model.bias[-1].item(), 0.0) # Not parametrized
self.assertEqual(id(model.bias), initial_bias_id) # Keeps the same id
self.assertFalse(
hasattr(model, "parametrizations")
) # Not parametrized the module
self.assertEqual(model.__class__, nn.Linear) # Resores the previous class
self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happeed
# Should not throw things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del weight_copy, bias_copy
# Test leave_parametrized=True
for _ in range(2):
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
parametrize.remove_parametrizations(
model, "weight", leave_parametrized=True
)
# We didn't change the dtype nor had multiple inputs, so the id should be the same
self.assertEqual(id(model.weight), initial_weight_id)
self.assertEqual(id(model.bias), initial_bias_id)
# Should not throw. Things are updated
weight_copy = model.weight.clone()
bias_copy = model.bias.clone()
sgd.zero_grad()
(model.weight.T @ model.bias).sum().backward()
sgd.step()
self.assertNotEqual(model.weight, weight_copy)
self.assertNotEqual(model.bias, bias_copy)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del weight_copy, bias_copy
@swap([True, False])
def test_register_and_remove_nested_parametrization(self):
r"""Test that it is possible to nest the parametrizations
meaning that the original param is parametrized again
"""
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
model = nn.Linear(8, 8)
# Add top level parametrization
parametrize.register_parametrization(model, "weight", Skew())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be skew-symmetric
A = model.weight
self.assertEqual(A, -A.T)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del A
# Add nested parametrization
param_mod = model.parametrizations.weight
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertFalse(parametrize.is_parametrized(param_mod))
self.assertFalse(parametrize.is_parametrized(param_mod, "original"))
parametrize.register_parametrization(param_mod, "original", Skew())
self.assertTrue(hasattr(param_mod, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(param_mod))
self.assertTrue(parametrize.is_parametrized(param_mod, "original"))
self.assertNotIn("original", param_mod._parameters)
# Result should be skew-symmetric
A = param_mod.original
self.assertEqual(A, -A.T)
# Remove nested param and check consistency
parametrize.remove_parametrizations(
param_mod, "original", leave_parametrized=False
)
self.assertFalse(hasattr(param_mod, "parametrizations"))
self.assertEqual(param_mod.__class__, parametrize.ParametrizationList)
# Remove top level and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=False)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
@swap([True, False])
def test_register_and_remove_buffer_parametrization(self):
r"""Test that it is possible to add and remove parametrizations on buffers"""
# Define a couple vector parametrizations
class FirstZero(nn.Module):
def forward(self, x):
return torch.cat([x.new_zeros(1), x[1:]])
class LastZero(nn.Module):
def forward(self, x):
return torch.cat([x[:-1], x.new_zeros(1)])
model = nn.Linear(8, 8)
# Instantiate parametrizations on buffers. It should work as expected
delattr(model, "bias")
model.bias = Buffer(torch.ones(8))
parametrize.register_parametrization(model, "bias", FirstZero())
parametrize.register_parametrization(model, "bias", LastZero())
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.0)
self.assertEqual(model.bias[-1].item(), 0.0)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# Remove parametrizations on buffers. It should work as expected
parametrize.remove_parametrizations(model, "bias", leave_parametrized=True)
self.assertFalse(parametrize.is_parametrized(model))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertEqual(model.bias[0].item(), 0.0)
self.assertEqual(model.bias[-1].item(), 0.0)
self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())
self.assertEqual(len(list(model.parameters())), 1)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
@skipIfTorchDynamo(
"Not applicable; see https://github.com/pytorch/pytorch/issues/127738"
)
@swap([True, False])
def test_serialization_parametrization(self):
r"""Test that it is possible to serialize a parametrized model via state_dict"""
# A stateful parametrization
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.id = Buffer(torch.eye(n))
self.B = Buffer(torch.empty(n, n))
init.orthogonal_(self.B)
def forward(self, X):
A = X.triu(1)
A = A - A.T
return self.B @ torch.linalg.solve(self.id + A, self.id - A)
def get_model():
model = torch.nn.Sequential(
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 1),
)
parametrize.register_parametrization(model[0], "weight", Orthogonal(5))
return model
model = get_model()
prev_weight = model[0].weight
prev_B = model[0].parametrizations.weight[0].B
new_model = get_model()
with TemporaryFileName() as fname:
torch.save(model.state_dict(), fname)
new_model.load_state_dict(torch.load(fname))
# Integrity tests
self.assertTrue(parametrize.is_parametrized(new_model[0], "weight"))
self.assertEqual(prev_weight, new_model[0].weight)
self.assertEqual(prev_B, new_model[0].parametrizations.weight[0].B)
# Trying to save the whole parametrized model raises
with self.assertRaisesRegex(RuntimeError, "state_dict"):
with TemporaryFileName() as fname:
torch.save(model, fname)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
@swap([True, False])
def test_initialization_parametrization(self):
r"""Test that it is possible to initialize a parametrization when it
implements a `right_inverse` method
"""
class Skew(nn.Module):
def forward(self, X):
A = X.triu(1)
return A - A.T
def is_skew(self, A):
return torch.allclose(A, -A.T, atol=1e-6)
def right_inverse(self, X):
if not self.is_skew(X):
raise ValueError("The matrix is not skew-symmetric.")
return X.triu(1)
# Implements a Cayley map where right_inverse is not quite the inverse of forward
class Orthogonal(nn.Module):
def __init__(self, n):
super().__init__()
self.B = Buffer(torch.eye(n))
def forward(self, X):
Id = torch.eye(X.size(0))
return self.B @ torch.linalg.solve(Id + X, Id - X)
def is_orthogonal(self, X):
Id = torch.eye(X.size(0))
return torch.allclose(X.T @ X, Id, atol=1e-4)
def right_inverse(self, X):
if not self.is_orthogonal(X):
raise ValueError("The input is not orthogonal.")
# cayley(0) == Id, so B @ cayley(0) == B
self.B = X
return torch.zeros_like(X)
N = 5
model = nn.Linear(N, N)
# Register the skew-symmetric constraint. The result is now skew-symmetric
skew = Skew()
# Make the weight skew-symmetric before registering the parametrization
with torch.no_grad():
model.weight.set_(skew(model.weight))
parametrize.register_parametrization(model, "weight", skew)
X = torch.rand(N, N)
# X is not skew-symmetric, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
# Make X skew-symmetric
X = X - X.T
model.weight = X
self.assertEqual(model.parametrizations.weight.original, X.triu(1))
self.assertEqual(model.weight, X)
# Having several parametrizations registered should work in the same way
parametrize.register_parametrization(model, "weight", Orthogonal(N))
# Register now the Cayley map. The result is now orthogonal
X = torch.rand(N, N)
# X is not orthogonal, so it throws an error
with self.assertRaises(ValueError):
model.weight = X
init.orthogonal_(X)
model.weight = X
self.assertEqual(model.weight, X)
self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X))
@swap([True, False])
def test_errors_unparametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on an unparametrized tensor
module = nn.Linear(3, 4)
weight_init = module.weight.clone()
class Identity(nn.Module):
def forward(self, x):
return x
# Register a parametrization on a non-existing parameter throws
with self.assertRaisesRegex(ValueError, "does not have a parameter"):
parametrize.register_parametrization(module, "foo", Identity())
self.assertFalse(parametrize.is_parametrized(module))
# Removing parametrizations from an unparametrized tensor throws
with self.assertRaisesRegex(ValueError, "does not have a parametrization"):
parametrize.remove_parametrizations(module, "bias")
self.assertFalse(parametrize.is_parametrized(module))
# A correct parametrization with several outputs
class Sum(nn.Module):
def forward(self, x, y):
return x + y
def right_inverse(self, z):
return z, torch.zeros_like(z)
parametrize.register_parametrization(module, "weight", Sum())
# Cannot remove a parametrization with several outputs with `leave_parametrized=False`
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
parametrize.remove_parametrizations(
module, "weight", leave_parametrized=False
)
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# A parametrization with an incorrect number of outputs
class WrongNumberParams(nn.Module):
def forward(self, x, y, z):
return x + y + z
def right_inverse(self, w):
return w, torch.zeros_like(w)
# Makes param(*param.right_inverse(X)) fail
with self.assertRaisesRegex(TypeError, "positional argument"):
parametrize.register_parametrization(module, "weight", WrongNumberParams())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization with a right_inverse that does not return a Tensor or Sequence[Tensor]
class WrongRightInverse(Identity):
def right_inverse(self, z):
return None
# right_inverse should return a Tensor or a Sequence[Tensor]
with self.assertRaisesRegex(ValueError, "Tensor or a Sequence of"):
parametrize.register_parametrization(module, "weight", WrongRightInverse())
self.assertFalse(parametrize.is_parametrized(module))
# If it's a sequence, it must to be a sequence of tensors
class WrongRightInverseSequence(nn.Module):
def forward(self, x, y):
return x
def right_inverse(self, z):
return None, z
with self.assertRaisesRegex(ValueError, "of the sequence with type"):
parametrize.register_parametrization(
module, "weight", WrongRightInverseSequence()
)
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtypeInverse(nn.Module):
def forward(self, x):
return x.float()
def right_inverse(self, w):
return w.bool()
# For parametrizations that return one tensor, right_inverse may not change the dtype
with self.assertRaisesRegex(
ValueError, "outputs one tensor, it may not change the dtype"
):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertFalse(parametrize.is_parametrized(module))
class ChangeDeviceInverse(nn.Module):
def forward(self, x):
return x.float()
def right_inverse(self, w):
return w.to(torch.device("meta"))
# For parametrizations that return one tensor, right_inverse may not change the device
with self.assertRaisesRegex(
ValueError, "outputs one tensor, it may not change the device"
):
parametrize.register_parametrization(
module, "weight", ChangeDeviceInverse()
)
self.assertFalse(parametrize.is_parametrized(module))
# Doesn't return a tensor
class NotTensor(nn.Module):
def forward(self, x):
return 2
# Forward must return a tensor
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", NotTensor())
self.assertFalse(parametrize.is_parametrized(module))
# A parametrization from one tensor to one tensor that changes the dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
# forward should not change the initial dtype
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertFalse(parametrize.is_parametrized(module))
# Change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
# forward should not change the original shape
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertFalse(parametrize.is_parametrized(module))
# Many to one that changes dtype
class ChangeDtypeMulti(nn.Module):
def forward(self, x, y):
return (x + y).bool()
def right_inverse(self, w):
return w, w + 1
# forward should not change the original shape even for parametrizations with many inputs
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeMulti())
self.assertFalse(parametrize.is_parametrized(module))
# Returning a sequence of size one, although weird, it's correct
class SequenceLen1(nn.Module):
def forward(self, x):
return x
def right_inverse(self, w):
return (w,)
parametrize.register_parametrization(module, "weight", SequenceLen1())
self.assertTrue(hasattr(module.parametrizations.weight, "original0"))
self.assertFalse(hasattr(module.parametrizations.weight, "original1"))
_ = module.weight # Does not throw
self.assertTrue(parametrize.is_parametrized(module))
parametrize.remove_parametrizations(module, "weight", leave_parametrized=True)
# None of the operations above should have altered the weight
self.assertFalse(parametrize.is_parametrized(module))
self.assertEqual(module.weight, weight_init)
@swap([True, False])
def test_errors_parametrized_tensor_parametrization(self):
# Test errors when registering a parametrization on a parametrized tensor
class Identity(nn.Module):
def forward(self, x):
return x
module = nn.Linear(3, 4)
parametrize.register_parametrization(module, "weight", Identity())
# Has to return a tensor
class WrongReturn(nn.Module):
def forward(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturn())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtype(nn.Module):
def forward(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "may not change the dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtype())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShape(nn.Module):
def forward(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "may not change the shape"):
parametrize.register_parametrization(module, "weight", ChangeShape())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# The following checks are mostly due to bugs in the code of the parametrization
# right_inverse has to return a tensor
class WrongReturnInverse(Identity):
def right_inverse(self, x):
return x, x
with self.assertRaisesRegex(ValueError, "right_inverse must return a tensor"):
parametrize.register_parametrization(module, "weight", WrongReturnInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change dtype
class ChangeDtypeInverse(Identity):
def right_inverse(self, x):
return x.bool()
with self.assertRaisesRegex(ValueError, "must have the same dtype"):
parametrize.register_parametrization(module, "weight", ChangeDtypeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# Cannot change shape
class ChangeShapeInverse(Identity):
def right_inverse(self, x):
return x[:-1]
with self.assertRaisesRegex(ValueError, "must have the same shape"):
parametrize.register_parametrization(module, "weight", ChangeShapeInverse())
self.assertTrue(parametrize.is_parametrized(module))
self.assertEqual(len(module.parametrizations.weight), 1)
self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
@swap([True, False])
def test_multiple_inputs_parametrization(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
# Simple parametrisation
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, w):
return 0.5 * w
model = nn.Linear(3, 3)
# Test one parametrization
parametrize.register_parametrization(model, "weight", RankOne())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(hasattr(model.parametrizations.weight, "original0"))
self.assertIn("original0", model.parametrizations.weight._parameters)
self.assertTrue(hasattr(model.parametrizations.weight, "original1"))
self.assertIn("original1", model.parametrizations.weight._parameters)
self.assertFalse(parametrize.is_parametrized(model, "bias"))
self.assertNotIn("weight", model._parameters)
# Result should be rank 1
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(
model, "weight", leave_parametrized=False
)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# Registering parametrizations with one input on top of one with multiple inputs should work
init_weight = model.weight.clone()
parametrize.register_parametrization(model, "weight", RankOne())
# Projecting a rank 1 matrix onto the matrices of rank one does not change the matrix
self.assertEqual(init_weight, model.weight)
parametrize.register_parametrization(model, "weight", Double())
# The matrix now is twice the initial matrix
self.assertEqual(2.0 * init_weight, model.weight)
# Multiplying by a scalar does not change the rank
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
# The model has now three parameters
self.assertEqual(len(list(model.parameters())), 3)
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
# Test backward. Should not throw
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# Same drill as before, removing should work as expected
with self.assertRaisesRegex(ValueError, "leave_parametrized=False"):
# Cannot remove a parametrization with multiple inputs and not leave it parametrized
parametrize.remove_parametrizations(
model, "weight", leave_parametrized=False
)
# Remove parametrization and check consistency
parametrize.remove_parametrizations(model, "weight", leave_parametrized=True)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
self.assertFalse(parametrize.is_parametrized(model))
self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)
self.assertIn("weight", model._parameters)
# The model has now two parameters
self.assertEqual(len(list(model.parameters())), 2)
# Test backward. Should not throw
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
for _ in range(2):
sgd.zero_grad()
loss = (model.weight.T @ model.bias).sum()
loss.backward()
sgd.step()
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
@swap([True, False])
def test_caching_parametrization(self):
r"""Test the caching system of a parametrization"""
# Define a couple matrix parametrizations
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
# Test that the caching system works
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
@swap([True, False])
def test_caching_parametrization_with_transfer_parametrizations_and_params(self):
r"""Test that transferring parametrizations doesn't cause issues with caching"""
class Skew(nn.Module):
def forward(self, X):
X = X.tril(-1)
return X - X.T
class Orthogonal(nn.Module):
def forward(self, X):
Id = torch.eye(X.size(0), device=X.device)
return torch.linalg.solve(Id + X, Id - X)
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Skew())
parametrize.register_parametrization(model, "weight", Orthogonal())
to_model = nn.Linear(5, 5)
parametrize.transfer_parametrizations_and_params(model, to_model)
with parametrize.cached():
X = model.weight
Y = model.weight
self.assertEqual(id(X), id(Y))
A = to_model.weight
B = to_model.weight
self.assertEqual(id(A), id(B))
# test that the results are distinct objects for each module
self.assertNotEqual(id(A), id(X))
@swap([True, False])
def test_parametrization_same_training_mode(self):
r"""Test training mode updated on parametrization registration"""
class Identity(nn.Module):
def forward(self, X):
return X
module = nn.Linear(4, 4)
module.eval()
parametrize.register_parametrization(module, "weight", Identity())
self.assertFalse(module.parametrizations.weight[0].training)
module.train()
parametrize.register_parametrization(module, "weight", Identity().eval())
self.assertTrue(module.parametrizations.weight[0].training)
self.assertTrue(module.parametrizations.weight[1].training)
@swap([True, False])
def test_type_before_parametrizations(self):
r"""Test that type_before_parametrizations always retrieves original type"""
class Identity(nn.Module):
def forward(self, X):
return X
model = nn.Linear(5, 5)
original_type = type(model)
self.assertTrue(
parametrize.type_before_parametrizations(model) == original_type
)
parametrize.register_parametrization(model, "weight", Identity())
self.assertTrue(
parametrize.type_before_parametrizations(model) == original_type
)
@skipIfTorchDynamo(
"Not applicable; see https://github.com/pytorch/pytorch/issues/127738"
)
@swap([True, False])
def test_deepcopy_after_parametrization(self):
r"""Test that we are able to create a deepcopy of the module when it's parametrized."""
class AddOne(nn.Module):
def forward(self, x):
return x + 1.0
class ModelWithoutDeepcopy(nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = nn.Parameter(
torch.tensor([1.0, 1.0, 1.0, 1.0]), requires_grad=True
)
self.bias = nn.Parameter(
torch.tensor([0.0, 0.0, 0.0, 0.0]), requires_grad=True
)
self.attr = [1.0, 2.0, 3.0, 4.0]
class ActualModel(ModelWithoutDeepcopy):
# Emulate custom implementation of the deepcopying.
def __deepcopy__(self, memo):
result = self.__new__(self.__class__)
memo[id(self)] = result
result.__dict__ = deepcopy(self.__dict__, memo)
return result
def check_deepcopy(m1: nn.Module, m2: nn.Module):
w1 = m1.parametrizations.weight.original
w2 = m2.parametrizations.weight.original
b1 = (
m1.parametrizations.bias.original
if parametrize.is_parametrized(m1, "bias")
else m1.bias
)
b2 = (
m2.parametrizations.bias.original
if parametrize.is_parametrized(m2, "bias")
else m2.bias
)
# Weights, biases and attributes should be equal but they must be different objects.
self.assertEqual(m1.__dict__.keys(), m2.__dict__.keys())
self.assertIsNot(m1, m2)
self.assertEqual(w1, w2)
self.assertIsNot(w1, w2)
self.assertEqual(b1, b2)
self.assertIsNot(b1, b2)
self.assertEqual(m1.attr, m2.attr)
self.assertIsNot(m1.attr, m2.attr)
for model in (ModelWithoutDeepcopy(), ActualModel()):
# General check that we are able to create deepcopy.
parametrize.register_parametrization(model, "weight", AddOne())
check_deepcopy(model, deepcopy(model))
# Check that this works on models with several parametrized tensors.
parametrize.register_parametrization(model, "bias", AddOne())
check_deepcopy(model, deepcopy(model))
# Check that this works on models where tensors have more than one parametrization.
parametrize.register_parametrization(model, "weight", AddOne())
check_deepcopy(model, deepcopy(model))
@swap([True, False])
def test_transfer_parametrizations_and_params(self):
r"""Test that all parametrizations and their associated parameters are transferred."""
class AddOne(nn.Module):
def forward(self, x):
return x + 1.0
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, x):
return 0.5 * x
class MinusOne(nn.Module):
def forward(self, x):
return x - 1.0
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", AddOne())
parametrize.register_parametrization(model, "weight", Double())
parametrize.register_parametrization(model, "weight", MinusOne())
hold_weight = model.weight
to_model = torch.ao.nn.qat.Linear(
5, 5, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model)
# checks that final and original value are correct and the to_model is parametrized
self.assertTrue(torch.nn.utils.parametrize.is_parametrized(to_model, "weight"))
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original,
to_model.parametrizations.weight.original,
)
# check that the transfer didn't affect the original value
self.assertEqual(hold_weight, model.weight)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
del hold_weight
# testing that changes to one set of parametrizations do not affect the other
parametrize.remove_parametrizations(to_model, "weight")
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(to_model, "weight"))
self.assertTrue(torch.nn.utils.parametrize.is_parametrized(model, "weight"))
# also test that parameters that don't exist in to_model get transferred
model.test_param = Parameter(torch.randn(5, 5))
self.assertTrue(not hasattr(to_model, "test_param"))
parametrize.register_parametrization(model, "test_param", Double())
hold_test_param = model.test_param
parametrize.transfer_parametrizations_and_params(model, to_model, "test_param")
# check that previously missing params got transferred correctly
self.assertEqual(model.test_param, to_model.test_param)
self.assertEqual(
model.parametrizations.test_param.original,
to_model.parametrizations.test_param.original,
)
# check that the new transfer didn't change the value for the from_module
self.assertEqual(hold_test_param, model.test_param)
@swap([True, False])
def test_transfer_parametrizations_and_params_right_inverse(self):
r"""Test that all parametrizations and their associated parameters are transferred."""
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
def right_inverse(self, x):
return 0.5 * x
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", Double())
hold_weight = model.weight
to_model = torch.ao.nn.qat.Linear(
5, 5, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model)
# check that transfer occurs successfully
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original,
to_model.parametrizations.weight.original,
)
# check that transfer doesn't affect the from_model weight
self.assertEqual(hold_weight, model.weight)
@swap([True, False])
def test_transfer_parametrizations_and_params_single_param(self):
r"""Test that all parametrizations and their associated parameters are transferred."""
class AddOne(nn.Module):
def forward(self, x):
return x + 1.0
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
class MinusOne(nn.Module):
def forward(self, x):
return x - 1.0
model = nn.Linear(5, 5, bias=True)
parametrize.register_parametrization(model, "weight", AddOne())
parametrize.register_parametrization(model, "weight", Double())
parametrize.register_parametrization(model, "weight", MinusOne())
parametrize.register_parametrization(model, "bias", AddOne())
parametrize.register_parametrization(model, "bias", Double())
parametrize.register_parametrization(model, "bias", MinusOne())
to_model = torch.ao.nn.qat.Linear(
5, 5, bias=True, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model, "weight")
# check that weight and only weight was transferred
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original,
to_model.parametrizations.weight.original,
)
self.assertTrue("bias" not in to_model.parametrizations)
# FIXME: Rewrite this test using functions not depending on LAPACK
# and remove the `@skipIfNoLapack` (see #70995)
@skipIfNoLapack
@swap([True, False])
def test_transfer_parametrizations_and_params_many_to_one(self):
# A parametrization with several outputs
class RankOne(nn.Module):
def forward(self, x, y):
# Form a rank-1 matrix from a pair of vectors
return x.unsqueeze(-1) @ y.unsqueeze(-2)
def right_inverse(self, Y):
# We project the given matrix onto the rank 1 matrices
U, S, Vh = torch.linalg.svd(Y, full_matrices=False)
# S is ordered in a decreasing way.
s0_sqrt = S[0].sqrt().unsqueeze(-1)
return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
class Double(nn.Module):
def forward(self, x):
return 2.0 * x
model = nn.Linear(3, 3)
parametrize.register_parametrization(model, "weight", RankOne())
parametrize.register_parametrization(model, "weight", Double())
hold_weight = model.weight
to_model = torch.ao.nn.qat.Linear(
3, 3, qconfig=torch.ao.quantization.get_default_qconfig()
)
parametrize.transfer_parametrizations_and_params(model, to_model)
# checks that final and original value are correct and the to_model is parametrized
self.assertTrue(torch.nn.utils.parametrize.is_parametrized(to_model, "weight"))
self.assertEqual(model.weight, to_model.weight)
self.assertEqual(
model.parametrizations.weight.original0,
to_model.parametrizations.weight.original0,
)
self.assertEqual(
model.parametrizations.weight.original1,
to_model.parametrizations.weight.original1,
)
# check that the transfer didn't affect the original value
self.assertEqual(hold_weight, model.weight)
# testing that changes to one set of parametrizations do not affect the other
model.test_param = Parameter(torch.randn(3, 3))
self.assertTrue(not hasattr(to_model, "test_param"))
parametrize.register_parametrization(model, "test_param", RankOne())
hold_test_param = model.test_param
parametrize.transfer_parametrizations_and_params(model, to_model, "test_param")
# also check that previously missing params got transferred correctly
self.assertEqual(model.test_param, to_model.test_param)
self.assertEqual(
model.parametrizations.test_param.original0,
to_model.parametrizations.test_param.original0,
)
self.assertEqual(
model.parametrizations.test_param.original1,
to_model.parametrizations.test_param.original1,
)
# check that the new transfer didn't change the value for the from_module
self.assertEqual(hold_test_param, model.test_param)
@swap([True, False])
def test_new_spectral_norm(self):
with set_default_dtype(torch.double):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
spectral_norm_m = m.parametrizations.weight[0]
self.assertEqual(spectral_norm_m._u.size(), torch.Size([m.weight.size(0)]))
# .parametrizations.weight.original should be trainable
self.assertTrue(hasattr(m.parametrizations.weight, "original"))
self.assertTrue("original" in m.parametrizations.weight._parameters)
# u should be just a reused buffer
self.assertTrue(hasattr(spectral_norm_m, "_u"))
self.assertTrue("_u" in spectral_norm_m._buffers)
self.assertTrue("_v" in spectral_norm_m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertIsNotNone(m.weight)
self.assertFalse("weight" in m._buffers)
self.assertFalse("weight" in m._parameters)
# it should also be sharing storage as `weight_orig`
# self.assertEqual(m.parametrizations.weight.original.storage(), m.weight.storage())
self.assertEqual(m.parametrizations.weight.original.size(), m.weight.size())
self.assertEqual(
m.parametrizations.weight.original.stride(), m.weight.stride()
)
m = torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
# spectral_norm is the only parametrization
self.assertFalse(hasattr(m, "parametrizations"))
self.assertTrue("weight" in m._parameters)
# We can register spectral_norm multiple times on the same parameter
# and on multiple parameters in the same module
m = torch.nn.utils.parametrizations.spectral_norm(m, "weight")
m = torch.nn.utils.parametrizations.spectral_norm(m, "weight")
m = torch.nn.utils.parametrizations.spectral_norm(m, "bias")
# If we remove the parametrization on bias, weight is still parametrized
# Removing a parametrization runs forward in eval mode if leave_parametrized=True
m = torch.nn.utils.parametrize.remove_parametrizations(m, "bias")
self.assertTrue("bias" in m._parameters)
self.assertTrue(hasattr(m, "parametrizations"))
self.assertFalse("weight" in m._parameters)
m = torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
# Neither weight and bias are parametrized
self.assertFalse(hasattr(m, "parametrizations"))
self.assertTrue("weight" in m._parameters)
self.assertFalse(torch.nn.utils.parametrize.is_parametrized(m))
# test correctness in training/eval modes and cpu/multi-gpu settings
for apply_dp in (True, False):
if apply_dp:
if not TEST_MULTIGPU:
continue
device = torch.device("cuda:0")
def maybe_wrap(m):
return torch.nn.DataParallel(m, [0, 1])
else:
device = torch.device("cpu")
def maybe_wrap(m):
return m
for requires_grad in (True, False):
def get_modules():
m = nn.Linear(3, 4).to(device)
m.weight.requires_grad_(requires_grad)
m = torch.nn.utils.parametrizations.spectral_norm(m)
wrapped_m = maybe_wrap(m)
spectral_norm_m = m.parametrizations.weight[0]
return m, wrapped_m, spectral_norm_m
input = torch.randn(2, 3, device=device)
m, wrapped_m, spectral_norm_m = get_modules()
self.assertTrue(hasattr(spectral_norm_m, "_u"))
u0 = spectral_norm_m._u.clone()
v0 = spectral_norm_m._v.clone()
# TEST TRAINING BEHAVIOR
# We perform GD first to modify the initial matrix
opt = torch.optim.SGD(wrapped_m.parameters(), lr=0.1)
opt.zero_grad()
wrapped_m(input).sum().backward()
opt.step()
out = wrapped_m(input)
if requires_grad:
# run forward again and assert that u and v are updated
self.assertNotEqual(u0, spectral_norm_m._u)
self.assertNotEqual(v0, spectral_norm_m._v)
# assert that backprop reaches original weight
# can't use gradcheck because the function changes as we
# activate through it in training mode
if requires_grad:
torch.autograd.grad(
out.sum(), m.parametrizations.weight.original
)
# test backward works with multiple forwards
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
out0 = wrapped_m(input)
out1 = wrapped_m(input)
return out0 + out1
# Make sure we can compute gradients wrt to all the parameters in the case
# of double forward
fn(input.clone().requires_grad_()).sum().backward()
gradcheck(
fn, (input.clone().requires_grad_(),), check_batched_grad=False
)
# test removing
# spectral norm module needs to be in eval mode if we'd like to
# avoid doing another power iteration
m, wrapped_m, _ = get_modules()
pre_remove_out = wrapped_m(input)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
pre_remove_out_ref = pre_remove_out.detach()
del pre_remove_out
else:
pre_remove_out_ref = pre_remove_out
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
self.assertEqual(wrapped_m(input), pre_remove_out_ref)
torch.nn.utils.parametrizations.spectral_norm(m)
for _ in range(3):
pre_remove_out = wrapped_m(input)
if get_swap_module_params_on_conversion():
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
pre_remove_out_ref = pre_remove_out.detach()
del pre_remove_out
else:
pre_remove_out_ref = pre_remove_out
m.eval()
m = torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
self.assertEqual(wrapped_m(input), pre_remove_out_ref)
# TEST EVAL BEHAVIOR
m, wrapped_m, spectral_norm_m = get_modules()
wrapped_m(input)
last_train_out = wrapped_m(input)
last_train_u = spectral_norm_m._u.clone()
last_train_v = spectral_norm_m._v.clone()
wrapped_m.zero_grad()
wrapped_m.eval()
eval_out0 = wrapped_m(input)
# assert eval gives same result as last training iteration
self.assertEqual(eval_out0, last_train_out)
# assert doing more iteration in eval don't change things
self.assertEqual(eval_out0, wrapped_m(input))
self.assertEqual(last_train_u, spectral_norm_m._u)
self.assertEqual(last_train_v, spectral_norm_m._v)
# FIXME: the code below is flaky when executed with DataParallel
# see https://github.com/pytorch/pytorch/issues/13818
if apply_dp:
continue
# test backward works with multiple forwards in mixed training
# and eval modes
# it uses training mode so we need to reset `u` and `v` vectors
# to same value at beginning for finite difference test to pass
saved_u = spectral_norm_m._u.clone()
saved_v = spectral_norm_m._v.clone()
def fn(input):
spectral_norm_m._u.data.copy_(saved_u)
spectral_norm_m._v.data.copy_(saved_v)
wrapped_m.train()
out0 = wrapped_m(input)
wrapped_m.eval()
out1 = wrapped_m(input)
wrapped_m.train()
out2 = wrapped_m(input)
wrapped_m.eval()
out3 = wrapped_m(input)
return out0 + out1 + out2 + out3
gradcheck(fn, (input.clone().requires_grad_(),))
# assert that backprop reaches weight_orig in eval
if requires_grad:
def fn(weight):
return wrapped_m(input)
gradcheck(fn, (m.parametrizations.weight.original,))
def test_register_parametrization_no_grad(self):
r"""Test that it is possible to register a parametrization without gradient"""
class SplitAndCat(nn.Module):
def right_inverse(self, x):
# split the tensor in two halves
return torch.split(x, x.shape[1] // 2)
def forward(self, x0, x1):
return torch.cat([x0, x1])
model = nn.Linear(8, 8)
model.weight.requires_grad = False
parametrize.register_parametrization(model, "weight", SplitAndCat())
# making sure the parameterized and decomposed Tensors both have requires_grad == False
self.assertFalse(model.weight.requires_grad)
self.assertFalse(model.parametrizations.weight.original0.requires_grad)
self.assertFalse(model.parametrizations.weight.original1.requires_grad)
@swap([True, False])
def test_new_spectral_norm_load_state_dict(self):
for activate_times in (0, 3):
inp = torch.randn(2, 3)
m = nn.Linear(3, 5)
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.train()
for _ in range(activate_times):
snm(inp)
state_dict = deepcopy(snm.state_dict())
self.assertEqual(
{
"parametrizations.weight.original",
"bias",
"parametrizations.weight.0._v",
"parametrizations.weight.0._u",
},
set(state_dict.keys()),
)
# test that non-strict loading works
non_strict_state_dict = deepcopy(state_dict)
non_strict_state_dict["nonsense"] = "nonsense"
with self.assertRaisesRegex(
RuntimeError, r'Unexpected key\(s\) in state_dict: "nonsense"'
):
snm.load_state_dict(non_strict_state_dict, strict=True)
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict["parametrizations.weight.original"]
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict["parametrizations.weight.0._u"]
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict["parametrizations.weight.0._v"]
snm.load_state_dict(non_strict_state_dict, strict=False)
non_strict_state_dict["weight"] = (
snm.weight.detach().clone()
) # set W as a buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict._metadata[
"parametrizations.weight.0"
] # remove metadata info
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict["weight"] # remove W buffer
snm.load_state_dict(non_strict_state_dict, strict=False)
del non_strict_state_dict["bias"]
snm.load_state_dict(non_strict_state_dict, strict=False)
# normal state_dict
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, "weight")
snm = torch.nn.utils.parametrizations.spectral_norm(m)
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
out0_eval = snm(inp)
snm.train()
out1_train = snm(inp)
out2_train = snm(inp)
snm.eval()
out3_eval = snm(inp)
# test that re-wrapping does not matter
m = torch.nn.utils.parametrize.remove_parametrizations(snm, "weight")
snm = torch.nn.utils.parametrizations.spectral_norm(m)
# Test normal loading
snm.load_state_dict(state_dict)
with torch.no_grad():
snm.eval()
self.assertEqual(out0_eval, snm(inp))
snm.train()
self.assertEqual(out1_train, snm(inp))
self.assertEqual(out2_train, snm(inp))
snm.eval()
self.assertEqual(out3_eval, snm(inp))
@swap([True, False])
def test_new_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# this should not run into incompatible shapes
m(inp)
# check that u refers to the same dimension
self.assertEqual(
snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape
)
@swap([True, False])
def test_new_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.parametrizations.spectral_norm(m)
snm = m.parametrizations.weight[0]
# naive forward
_weight = m.parametrizations.weight.original
_bias, _v = m.bias, snm._v
_weight_mat = _weight.view(_weight.size(0), -1)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertEqual(expect_out, out_hat)
@swap([True, False])
@skipIfTorchDynamo("Test does not work with TorchDynamo")
def test_new_spectral_norm_value(self):
# a test that the spectral norm (= top singular value)
# is in fact properly calculated, using example of a simple diagonal matrix.
for dtype in (torch.float, torch.cfloat):
m = nn.Linear(2, 2, dtype=dtype)
with torch.no_grad():
# set weight to be diagonal
x = torch.diagonal(m.weight)
m.weight = nn.Parameter(torch.diag(x))
torch.nn.utils.parametrizations.spectral_norm(m)
# weights should be rescaled by spectral norm, (i.e., largest diagonal element in norm)
expected = torch.diag(x / x.abs().max())
self.assertEqual(m.weight.data, expected)
@skipIfNoLapack
@swap([True, False])
def test_orthogonal_parametrization(self):
# Orthogonal implements 6 algorithms (3x parametrizations times 2 options of use_trivialization)
def assert_is_orthogonal(X):
n, k = X.size(-2), X.size(-1)
if n < k:
X = X.mT
n, k = k, n
Id = torch.eye(k, dtype=X.dtype, device=X.device).expand(
*(X.size()[:-2]), k, k
)
eps = 10 * n * torch.finfo(X.dtype).eps
torch.testing.assert_close(X.mH @ X, Id, atol=eps, rtol=0.0)
def assert_weight_allclose_Q(weight, W):
# Test that weight is equal to the Q part of the QR decomposition of W
# (or of its transpose if the matrix is wide)
wide_matrix = W.size(-2) < W.size(-1)
if wide_matrix:
W = W.mT
Q, R = torch.linalg.qr(W)
Q *= R.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
if wide_matrix:
Q = Q.mT
torch.testing.assert_close(Q, weight, atol=1e-5, rtol=0.0)
for shape, dtype, use_linear in product(
((4, 4), (5, 3), (3, 5)), # square/ tall / wide
(torch.float32, torch.complex64),
(True, False),
):
# Conv2d does not support complex yet
if not use_linear:
continue
if use_linear:
input = torch.randn(3, shape[0], dtype=dtype)
else:
input = torch.randn(2, 2, shape[0] + 2, shape[1] + 1, dtype=dtype)
for parametrization, use_trivialization in product(
("matrix_exp", "cayley", "householder"), (False, True)
):
# right_inverse for Cayley and matrix_exp not implemented for use_trivialization=False
# See Note [right_inverse expm cayley]
can_initialize = use_trivialization or parametrization == "householder"
# We generate them every time to always start with fresh weights
if use_linear:
m = nn.Linear(*shape, dtype=dtype)
else:
m = nn.Conv2d(2, 3, shape, dtype=dtype)
# We do not support householder for complex inputs
# See Note [Householder complex]
# When using the swap_tensors path, this is needed so that the autograd
# graph is not alive anymore.
if get_swap_module_params_on_conversion():
w_init = m.weight.detach().clone()
else:
w_init = m.weight.clone()
if parametrization == "householder" and m.weight.is_complex():
msg = "householder parametrization does not support complex tensors"
with self.assertRaisesRegex(ValueError, msg):
torch.nn.utils.parametrizations.orthogonal(
m,
"weight",
parametrization,
use_trivialization=use_trivialization,
)
continue
wide_matrix = w_init.size(-2) < w_init.size(-1)
torch.nn.utils.parametrizations.orthogonal(
m, "weight", parametrization, use_trivialization=use_trivialization
)
# Forwards works as expected
self.assertEqual(w_init.shape, m.weight.shape)
assert_is_orthogonal(m.weight)
if can_initialize:
assert_weight_allclose_Q(m.weight, w_init)
# Initializing with a given orthogonal matrix works
X = torch.randn_like(m.weight)
if wide_matrix:
X = X.mT
w_new = torch.linalg.qr(X).Q
if wide_matrix:
w_new = w_new.mT
if can_initialize:
m.weight = w_new
torch.testing.assert_close(w_new, m.weight, atol=1e-5, rtol=0.0)
else:
msg = (
"assign to the matrix exponential or the Cayley parametrization"
)
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
# Initializing with a non-orthogonal matrix makes m.weight be the Q part of the given matrix
w_new = torch.randn_like(m.weight)
if can_initialize:
m.weight = w_new
assert_weight_allclose_Q(m.weight, w_new)
else:
msg = (
"assign to the matrix exponential or the Cayley parametrization"
)
with self.assertRaisesRegex(NotImplementedError, msg):
m.weight = w_new
opt = torch.optim.SGD(m.parameters(), lr=0.1)
for _ in range(2):
opt.zero_grad()
m(input).norm().backward()
grad = m.parametrizations.weight.original.grad
self.assertIsNotNone(grad)
# We do not update the upper triangular part of the matrix if tall tril if wide
if grad.size(-2) >= grad.size(-1):
zeros_grad = grad.triu(1)
else:
zeros_grad = grad.tril(-1)
self.assertEqual(zeros_grad, torch.zeros_like(zeros_grad))
# The gradient in the diagonal can only be imaginary because a skew-Hermitian
# matrix has imaginary diagonal
diag_grad = grad.diagonal(dim1=-2, dim2=-1)
if grad.is_complex():
diag_grad = diag_grad.real
self.assertEqual(diag_grad, torch.zeros_like(diag_grad))
opt.step()
assert_is_orthogonal(m.weight)
@skipIfNoLapack
@swap([True, False])
def test_orthogonal_errors(self):
m = nn.Linear(3, 4)
with self.assertRaisesRegex(ValueError, "has to be one of"):
torch.nn.utils.parametrizations.orthogonal(m, "weight", "foo")
with self.assertRaisesRegex(ValueError, "Expected a matrix"):
torch.nn.utils.parametrizations.orthogonal(m, "bias")
torch.nn.utils.parametrizations.orthogonal(m, "weight")
with self.assertRaisesRegex(ValueError, "matrices of shape"):
m.weight = torch.randn(5, 5)
torch.nn.utils.parametrize.remove_parametrizations(m, "weight")
@swap([True, False])
def test_weight_norm_state_dict_compat(self):
m = nn.Linear(4, 5)
m = torch.nn.utils.weight_norm(m)
old_dict = m.state_dict()
m2 = nn.Linear(4, 5)
m2 = torch.nn.utils.parametrizations.weight_norm(m2)
m2.load_state_dict(old_dict)
input = torch.randn(3, 4)
self.assertEqual(m(input), m2(input))
@swap([True, False])
def test_weight_norm_pickle(self):
m = nn.Linear(4, 5)
m = torch.nn.utils.parametrizations.weight_norm(m)
with self.assertRaisesRegex(RuntimeError, "state_dict"):
pickle.dumps(m)
@swap([True, False])
def test_weight_norm_deepcopy(self):
m = nn.Linear(4, 5)
m = torch.nn.utils.parametrizations.weight_norm(m)
m2 = deepcopy(m)
input = torch.randn(3, 4)
self.assertEqual(m(input), m2(input))
@swap([True])
def test_wrapper_subclass_parametrization(self):
class Subclassify(nn.Module):
def forward(self, X):
return TwoTensor(X, X)
class UnSubclassify(nn.Module):
def forward(self, X):
return X.a
class IdentityWithRightInverse(nn.Module):
def forward(self, X):
return X
def right_inverse(self, X):
return TwoTensor(X, X)
def _check_parametrization(
parametrization,
type_before_registration,
type_after_registration,
leave_parametrized=False,
type_after_right_inverse=None,
):
model = nn.Linear(2, 2)
buf = torch.randn(2, 2)
model.buf = torch.nn.Buffer(buf)
if (
type_before_registration == TwoTensor
and type_after_registration == Tensor
):
model._apply(lambda t: TwoTensor(t, t))
initial_weight = model.weight.detach().clone()
initial_weight_id = id(model.weight)
initial_buf = model.buf.detach().clone()
initial_buf_id = id(model.buf)
type_original_weight = (
type_before_registration
if type_after_right_inverse is None
else type_after_right_inverse
)
type_original_buf = (
Tensor if type_original_weight is nn.Parameter else type_original_weight
)
type_after_removal_buf = (
type_after_registration if leave_parametrized else type_original_buf
)
if leave_parametrized:
if type_after_registration is Tensor:
type_after_removal_weight = nn.Parameter
else:
type_after_removal_weight = type_after_registration
else:
type_after_removal_weight = type_original_weight
parametrize.register_parametrization(model, "weight", parametrization())
parametrize.register_parametrization(model, "buf", parametrization())
self.assertTrue(hasattr(model, "parametrizations"))
self.assertTrue(parametrize.is_parametrized(model))
self.assertFalse(parametrize.is_parametrized(model, "bias"))
# checks for weight
self.assertTrue(parametrize.is_parametrized(model, "weight"))
self.assertTrue(
isinstance(model.parametrizations.weight.original, nn.Parameter)
)
self.assertTrue(
type(model.parametrizations.weight.original) is type_original_weight
)
self.assertNotIn("weight", model._parameters)
self.assertTrue(type(model.weight) is type_after_registration)
# checks for buf
self.assertTrue(parametrize.is_parametrized(model, "buf"))
self.assertFalse(
isinstance(model.parametrizations.buf.original, nn.Parameter)
)
self.assertTrue(
type(model.parametrizations.buf.original) is type_original_buf
)
self.assertTrue(type(model.buf) is type_after_registration)
parametrize.remove_parametrizations(
model, "weight", leave_parametrized=leave_parametrized
)
parametrize.remove_parametrizations(
model, "buf", leave_parametrized=leave_parametrized
)
self.assertFalse(hasattr(model, "parametrizations"))
self.assertEqual(model.__class__, nn.Linear)
# checks for weight
self.assertTrue(type(model.weight) is type_after_removal_weight)
self.assertTrue(isinstance(model.weight, nn.Parameter))
self.assertEqual(id(model.weight), initial_weight_id)
# checks for buf
self.assertTrue(type(model.buf) is type_after_removal_buf)
self.assertFalse(isinstance(model.buf, nn.Parameter))
self.assertEqual(id(model.buf), initial_buf_id)
if not leave_parametrized and type_after_right_inverse is None:
self.assertEqual(model.weight, initial_weight)
self.assertEqual(model.buf, initial_buf)
_check_parametrization(Subclassify, nn.Parameter, TwoTensor)
_check_parametrization(UnSubclassify, TwoTensor, Tensor)
_check_parametrization(
IdentityWithRightInverse,
nn.Parameter,
TwoTensor,
type_after_right_inverse=TwoTensor,
)
_check_parametrization(
Subclassify, nn.Parameter, TwoTensor, leave_parametrized=True
)
_check_parametrization(
UnSubclassify, TwoTensor, Tensor, leave_parametrized=True
)
_check_parametrization(
IdentityWithRightInverse,
nn.Parameter,
TwoTensor,
leave_parametrized=True,
type_after_right_inverse=TwoTensor,
)
| TestNNParametrization |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol30.py | {
"start": 373,
"end": 502
} | class ____(Protocol):
v1: int
def func2(c2: C2):
# This should generate an error because v1 is invariant.
x: P2 = c2
| C2 |
python | pandas-dev__pandas | pandas/tests/io/test_feather.py | {
"start": 508,
"end": 10407
} | class ____:
def check_error_on_write(self, df, exc, err_msg, temp_file):
# check that we are raising the exception
# on writing
with pytest.raises(exc, match=err_msg):
to_feather(df, temp_file)
def check_external_error_on_write(self, df, temp_file):
# check that we are raising the exception
# on writing
with tm.external_error_raised(Exception):
to_feather(df, temp_file)
def check_round_trip(
self, df, temp_file, expected=None, write_kwargs=None, **read_kwargs
):
if write_kwargs is None:
write_kwargs = {}
if expected is None:
expected = df.copy()
to_feather(df, temp_file, **write_kwargs)
result = read_feather(temp_file, **read_kwargs)
tm.assert_frame_equal(result, expected)
def test_error(self, temp_file):
msg = "feather only support IO with DataFrames"
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, ValueError, msg, temp_file)
def test_basic(self, temp_file):
tz = zoneinfo.ZoneInfo("US/Eastern")
df = pd.DataFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
"bool_with_null": [True, np.nan, False],
"cat": pd.Categorical(list("abc")),
"dt": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3)), freq=None
),
"dttz": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, tz=tz)),
freq=None,
),
"dt_with_null": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
"dtns": pd.DatetimeIndex(
list(pd.date_range("20130101", periods=3, freq="ns")), freq=None
),
}
)
df["periods"] = pd.period_range("2013", freq="M", periods=3)
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
df["intervals"] = pd.interval_range(0, 3, 3)
assert df.dttz.dtype.tz.key == "US/Eastern"
expected = df.copy()
expected.loc[1, "bool_with_null"] = None
self.check_round_trip(df, temp_file, expected=expected)
def test_duplicate_columns(self, temp_file):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_external_error_on_write(df, temp_file)
def test_read_columns(self, temp_file):
# GH 24025
df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list(range(1, 4)),
"col3": list("xyz"),
"col4": list(range(4, 7)),
}
)
columns = ["col1", "col3"]
self.check_round_trip(df, temp_file, expected=df[columns], columns=columns)
def test_read_columns_different_order(self, temp_file):
# GH 33878
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
expected = df[["B", "A"]]
self.check_round_trip(df, temp_file, expected, columns=["B", "A"])
def test_unsupported_other(self, temp_file):
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_external_error_on_write(df, temp_file)
def test_rw_use_threads(self, temp_file):
df = pd.DataFrame({"A": np.arange(100000)})
self.check_round_trip(df, temp_file, use_threads=True)
self.check_round_trip(df, temp_file, use_threads=False)
def test_path_pathlib(self):
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
).reset_index()
result = tm.round_trip_pathlib(df.to_feather, read_feather)
tm.assert_frame_equal(df, result)
def test_passthrough_keywords(self, temp_file):
df = pd.DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=pd.Index(list("ABCD")),
index=pd.Index([f"i-{i}" for i in range(30)]),
).reset_index()
self.check_round_trip(df, temp_file, write_kwargs={"version": 1})
@pytest.mark.network
@pytest.mark.single_cpu
def test_http_path(self, feather_file, httpserver):
# GH 29055
expected = read_feather(feather_file)
with open(feather_file, "rb") as f:
httpserver.serve_content(content=f.read())
res = read_feather(httpserver.url)
tm.assert_frame_equal(expected, res)
def test_read_feather_dtype_backend(
self, string_storage, dtype_backend, using_infer_string, temp_file
):
# GH#50765
df = pd.DataFrame(
{
"a": pd.Series([1, pd.NA, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="Int64"),
"c": pd.Series([1.5, pd.NA, 2.5], dtype="Float64"),
"d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"),
"e": [True, False, None],
"f": [True, False, True],
"g": ["a", "b", "c"],
"h": ["a", "b", None],
}
)
to_feather(df, temp_file)
with pd.option_context("mode.string_storage", string_storage):
result = read_feather(temp_file, dtype_backend=dtype_backend)
if dtype_backend == "pyarrow":
pa = pytest.importorskip("pyarrow")
if using_infer_string:
string_dtype = pd.ArrowDtype(pa.large_string())
else:
string_dtype = pd.ArrowDtype(pa.string())
else:
string_dtype = pd.StringDtype(string_storage)
expected = pd.DataFrame(
{
"a": pd.Series([1, pd.NA, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="Int64"),
"c": pd.Series([1.5, pd.NA, 2.5], dtype="Float64"),
"d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"),
"e": pd.Series([True, False, pd.NA], dtype="boolean"),
"f": pd.Series([True, False, True], dtype="boolean"),
"g": pd.Series(["a", "b", "c"], dtype=string_dtype),
"h": pd.Series(["a", "b", None], dtype=string_dtype),
}
)
if dtype_backend == "pyarrow":
from pandas.arrays import ArrowExtensionArray
expected = pd.DataFrame(
{
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
for col in expected.columns
}
)
if using_infer_string:
expected.columns = expected.columns.astype(
pd.StringDtype(string_storage, na_value=np.nan)
)
tm.assert_frame_equal(result, expected)
def test_int_columns_and_index(self, temp_file):
df = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index([3, 4, 5], name="test"))
self.check_round_trip(df, temp_file)
def test_invalid_dtype_backend(self, temp_file):
msg = (
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
"'pyarrow' are allowed."
)
df = pd.DataFrame({"int": list(range(1, 4))})
df.to_feather(temp_file)
with pytest.raises(ValueError, match=msg):
read_feather(temp_file, dtype_backend="numpy")
def test_string_inference(self, tmp_path, using_infer_string):
# GH#54431
path = tmp_path / "test_string_inference.p"
df = pd.DataFrame(data={"a": ["x", "y"]})
df.to_feather(path)
with pd.option_context("future.infer_string", True):
result = read_feather(path)
dtype = pd.StringDtype(na_value=np.nan)
expected = pd.DataFrame(
data={"a": ["x", "y"]}, dtype=pd.StringDtype(na_value=np.nan)
)
expected = pd.DataFrame(
data={"a": ["x", "y"]},
dtype=dtype,
columns=pd.Index(
["a"],
dtype=object
if pa_version_under19p0 and not using_infer_string
else dtype,
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa_version_under18p0, reason="not supported before 18.0")
def test_string_inference_string_view_type(self, tmp_path):
# GH#54798
import pyarrow as pa
from pyarrow import feather
path = tmp_path / "string_view.parquet"
table = pa.table({"a": pa.array([None, "b", "c"], pa.string_view())})
feather.write_feather(table, path)
with pd.option_context("future.infer_string", True):
result = read_feather(path)
expected = pd.DataFrame(
data={"a": [None, "b", "c"]}, dtype=pd.StringDtype(na_value=np.nan)
)
tm.assert_frame_equal(result, expected)
def test_out_of_bounds_datetime_to_feather(self, temp_file):
# GH#47832
df = pd.DataFrame(
{
"date": [
datetime.fromisoformat("1654-01-01"),
datetime.fromisoformat("1920-01-01"),
],
}
)
self.check_round_trip(df, temp_file)
| TestFeather |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/detector/test_stateful.py | {
"start": 18498,
"end": 21226
} | class ____(TestCase):
def setUp(self) -> None:
self.detector = self.create_detector(
name="Redis Optimization Detector",
project=self.project,
)
self.handler = MockDetectorStateHandler(
detector=self.detector,
thresholds={
Level.LOW: 2,
Level.HIGH: 3,
},
)
self.group_keys = [None, "group1", "group2"]
def test_get_state_data_uses_single_redis_pipeline(self) -> None:
"""
Test that get_state_data uses only 1 Redis pipeline operation.
"""
with mock.patch(
"sentry.workflow_engine.handlers.detector.stateful.get_redis_client"
) as mock_redis:
mock_pipeline = mock.Mock()
mock_redis.return_value.pipeline.return_value = mock_pipeline
mock_pipeline.execute.return_value = ["0", "1", "2", "3", "4", "5"] # Mock values
# Call get_state_data
self.handler.state_manager.get_state_data(self.group_keys)
# Verify pipeline was created only once
mock_redis.return_value.pipeline.assert_called_once()
# Verify pipeline.execute was called only once
mock_pipeline.execute.assert_called_once()
# Verify multiple gets were added to the pipeline
# Should be 3 groups * (1 dedupe + 2 counter keys) = 9 total gets
expected_get_calls = 3 * (1 + len(self.handler.state_manager.counter_names))
assert mock_pipeline.get.call_count == expected_get_calls
def test_redis_key_mapping_generates_correct_keys(self) -> None:
"""
Test that redis key mapping generates the expected keys.
"""
state_manager = self.handler.state_manager
key_mapping = state_manager.get_redis_keys_for_group_keys(self.group_keys)
# Should have dedupe keys for each group
dedupe_keys = [k for k, (_, key_type) in key_mapping.items() if key_type == "dedupe"]
assert len(dedupe_keys) == len(self.group_keys)
# Should have counter keys for each group and counter name
counter_keys = [k for k, (_, key_type) in key_mapping.items() if key_type != "dedupe"]
expected_counter_keys = len(self.group_keys) * len(state_manager.counter_names)
assert len(counter_keys) == expected_counter_keys
def test_bulk_get_redis_values_handles_empty_keys(self) -> None:
"""
Test that bulk_get_redis_values handles empty key list correctly.
"""
state_manager = self.handler.state_manager
result = state_manager.bulk_get_redis_values([])
assert result == {}
| TestDetectorStateManagerRedisOptimization |
python | google__pytype | pytype/rewrite/tests/test_basic.py | {
"start": 189,
"end": 2049
} | class ____(RewriteTest):
"""Basic functional tests."""
def setUp(self):
super().setUp()
self.options.tweak(use_rewrite=True)
def test_analyze_functions(self):
self.Check("""
def f():
def g():
pass
""")
def test_analyze_function_with_nonlocal(self):
self.Check("""
def f():
x = None
def g():
return x
""")
def test_class(self):
self.Check("""
class C:
def __init__(self):
pass
""")
def test_method_side_effect(self):
self.Check("""
class C:
def f(self):
self.x = 3
def g(self):
self.f()
return self.x
""")
def test_infer_stub(self):
ty = self.Infer("""
def f():
def g():
pass
""")
self.assertTypesMatchPytd(ty, """
def f() -> None: ...
""")
def test_assert_type(self):
errors = self.CheckWithErrors("""
assert_type(0, int)
assert_type(0, "int")
assert_type(0, "str") # assert-type[e]
""")
self.assertErrorSequences(errors, {'e': ['Expected: str', 'Actual: int']})
def test_infer_class_body(self):
ty = self.Infer("""
class C:
def __init__(self):
self.x = 3
def f(self):
return self.x
""")
self.assertTypesMatchPytd(ty, """
class C:
x: int
def __init__(self) -> None: ...
def f(self) -> int: ...
""")
def test_inheritance(self):
ty = self.Infer("""
class C:
pass
class D(C):
pass
""")
self.assertTypesMatchPytd(ty, """
class C: ...
class D(C): ...
""")
def test_fstrings(self):
self.Check("""
x = 1
y = 2
z = (
f'x = {x}'
' and '
f'y = {y}'
)
assert_type(z, str)
""")
| BasicTest |
python | networkx__networkx | networkx/algorithms/tests/test_graphical.py | {
"start": 1085,
"end": 5366
} | class ____:
@classmethod
def setup_class(cls):
global atlas
from networkx.generators import atlas
cls.GAG = atlas.graph_atlas_g()
def test_atlas(self):
for graph in self.GAG:
deg = (d for n, d in graph.degree())
assert nx.is_graphical(deg, method="eg")
assert nx.is_graphical(deg, method="hh")
def test_small_graph_true():
z = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
assert nx.is_graphical(z, method="hh")
assert nx.is_graphical(z, method="eg")
z = [10, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2]
assert nx.is_graphical(z, method="hh")
assert nx.is_graphical(z, method="eg")
z = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert nx.is_graphical(z, method="hh")
assert nx.is_graphical(z, method="eg")
def test_small_graph_false():
z = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
assert not nx.is_graphical(z, method="hh")
assert not nx.is_graphical(z, method="eg")
z = [6, 5, 4, 4, 2, 1, 1, 1]
assert not nx.is_graphical(z, method="hh")
assert not nx.is_graphical(z, method="eg")
z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert not nx.is_graphical(z, method="hh")
assert not nx.is_graphical(z, method="eg")
def test_directed_degree_sequence():
# Test a range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G = nx.erdos_renyi_graph(n, p * (i + 1), None, True)
din = (d for n, d in G.in_degree())
dout = (d for n, d in G.out_degree())
assert nx.is_digraphical(din, dout)
def test_small_directed_sequences():
dout = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
din = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1]
assert nx.is_digraphical(din, dout)
# Test nongraphical directed sequence
dout = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
din = [103, 102, 102, 102, 102, 102, 102, 102, 102, 102]
assert not nx.is_digraphical(din, dout)
# Test digraphical small sequence
dout = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1]
assert nx.is_digraphical(din, dout)
# Test nonmatching sum
din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1]
assert not nx.is_digraphical(din, dout)
# Test for negative integer in sequence
din = [2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4]
assert not nx.is_digraphical(din, dout)
# Test for noninteger
din = dout = [1, 1, 1.1, 1]
assert not nx.is_digraphical(din, dout)
din = dout = [1, 1, "rer", 1]
assert not nx.is_digraphical(din, dout)
def test_multi_sequence():
# Test nongraphical multi sequence
seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1]
assert not nx.is_multigraphical(seq)
# Test small graphical multi sequence
seq = [6, 5, 4, 4, 2, 1, 1, 1]
assert nx.is_multigraphical(seq)
# Test for negative integer in sequence
seq = [6, 5, 4, -4, 2, 1, 1, 1]
assert not nx.is_multigraphical(seq)
# Test for sequence with odd sum
seq = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert not nx.is_multigraphical(seq)
# Test for noninteger
seq = [1, 1, 1.1, 1]
assert not nx.is_multigraphical(seq)
seq = [1, 1, "rer", 1]
assert not nx.is_multigraphical(seq)
def test_pseudo_sequence():
# Test small valid pseudo sequence
seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1]
assert nx.is_pseudographical(seq)
# Test for sequence with odd sum
seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1]
assert not nx.is_pseudographical(seq)
# Test for negative integer in sequence
seq = [1000, 3, 3, 3, 3, 2, 2, -2, 1, 1]
assert not nx.is_pseudographical(seq)
# Test for noninteger
seq = [1, 1, 1.1, 1]
assert not nx.is_pseudographical(seq)
seq = [1, 1, "rer", 1]
assert not nx.is_pseudographical(seq)
def test_numpy_degree_sequence():
np = pytest.importorskip("numpy")
ds = np.array([1, 2, 2, 2, 1], dtype=np.int64)
assert nx.is_graphical(ds, "eg")
assert nx.is_graphical(ds, "hh")
ds = np.array([1, 2, 2, 2, 1], dtype=np.float64)
assert nx.is_graphical(ds, "eg")
assert nx.is_graphical(ds, "hh")
ds = np.array([1.1, 2, 2, 2, 1], dtype=np.float64)
pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "eg")
pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "hh")
| TestAtlas |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/float.py | {
"start": 11975,
"end": 13155
} | class ____(BaseFloat[np.dtypes.Float64DType, np.float64]):
"""
A Zarr data type for arrays containing 64-bit floating point numbers.
Wraps the [`np.dtypes.Float64DType`][numpy.dtypes.Float64DType] data type. Scalars for this data type are instances
of [`np.float64`][numpy.float64].
Attributes
----------
dtype_cls : Type[np.dtypes.Float64DType]
The NumPy dtype class for this data type.
References
----------
This class implements the float64 data type defined in Zarr V2 and V3.
See the [Zarr V2](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding) and [Zarr V3](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v3/data-types/index.rst) specification documents for details.
"""
dtype_cls = np.dtypes.Float64DType
_zarr_v3_name = "float64"
_zarr_v2_names: ClassVar[tuple[Literal[">f8"], Literal["<f8"]]] = (">f8", "<f8")
@property
def item_size(self) -> int:
"""
The size of a single scalar in bytes.
Returns
-------
int
The size of a single scalar in bytes.
"""
return 8
| Float64 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 24587,
"end": 24710
} | class ____(DagsterError):
"""When job execution completes with steps in an unknown state."""
| DagsterUnknownStepStateError |
python | pandas-dev__pandas | pandas/tests/indexes/test_any_index.py | {
"start": 3352,
"end": 4842
} | class ____:
def test_getitem_0d_ndarray(self, index):
# GH#55601
if len(index) == 0:
pytest.skip(reason="Test assumes non-empty index")
key = np.array(0)
result = index[key]
assert result == index[0]
def test_get_loc_listlike_raises_invalid_index_error(self, index):
# and never TypeError
key = np.array([0, 1], dtype=np.intp)
with pytest.raises(InvalidIndexError, match=r"\[0 1\]"):
index.get_loc(key)
with pytest.raises(InvalidIndexError, match=r"\[False True\]"):
index.get_loc(key.astype(bool))
def test_getitem_ellipsis(self, index):
# GH#21282
result = index[...]
assert result.equals(index)
assert result is not index
def test_slice_keeps_name(self, index):
assert index.name == index[1:].name
@pytest.mark.parametrize("item", [101, "no_int", 2.5])
def test_getitem_error(self, index, item):
msg = "|".join(
[
r"index 101 is out of bounds for axis 0 with size [\d]+",
re.escape(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean arrays "
"are valid indices"
),
"index out of bounds", # string[pyarrow]
]
)
with pytest.raises(IndexError, match=msg):
index[item]
| TestIndexing |
python | ipython__ipython | IPython/extensions/deduperreload/deduperreload.py | {
"start": 1838,
"end": 2398
} | class ____(NamedTuple):
"""
Each node represents a function.
qualified_name: string which represents the namespace/name of the function
abstract_syntax_tree: subtree of the overall module which corresponds to this function
qualified_name is of the structure: (namespace1, namespace2, ..., name)
For example, foo() in the following would be represented as (A, B, foo):
class A:
class B:
def foo():
pass
"""
qualified_name: tuple[str, ...]
abstract_syntax_tree: ast.AST
| DependencyNode |
python | huggingface__transformers | src/transformers/models/bros/modeling_bros.py | {
"start": 13926,
"end": 16990
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BrosAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise Exception(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BrosAttention(config)
self.intermediate = BrosIntermediate(config)
self.output = BrosOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
bbox_pos_emb: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(
hidden_states,
bbox_pos_emb=bbox_pos_emb,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
if hasattr(self, "crossattention"):
raise Exception(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (None,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| BrosLayer |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_sum_values_to_be_equal_to_single_column.py | {
"start": 672,
"end": 2695
} | class ____(MulticolumnMapMetricProvider):
# </snippet>
# This is the id string that will be used to reference your metric.
# <snippet>
condition_metric_name = "multicolumn_values.sum_values_equal_to_single_column"
# </snippet>
# These point your metric at the provided keys to facilitate calculation
condition_domain_keys = (
"batch_id",
"table",
"column_list",
"row_condition",
"condition_parser",
"ignore_row_if",
)
condition_value_keys = ("additional_value",)
@multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column_list, additional_value, **kwargs):
columns_to_sum = column_list[0:-1]
sqlalchemy_columns_to_sum = columns_to_sum[0]
if len(columns_to_sum) > 1:
for column in columns_to_sum[1:]:
sqlalchemy_columns_to_sum += column
column_to_equal = column_list[-1]
sqlalchemy_columns_to_sum += additional_value
return sqlalchemy_columns_to_sum == column_to_equal
@multicolumn_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, dataframe, additional_value, **kwargs):
column_list = dataframe.columns
columns_to_sum = column_list[:-1]
column_to_equal = column_list[-1]
sum_columns = functools.reduce(operator.add, [F.col(column) for column in columns_to_sum])
sum_columns += additional_value
equal_column = F.col(column_to_equal)
return sum_columns == equal_column
@multicolumn_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, dataframe, additional_value, **kwargs):
columns_to_sum = dataframe.iloc[:, :-1]
column_to_equal = dataframe.iloc[:, -1]
sum_columns = columns_to_sum.sum(axis=1, skipna=False)
sum_columns += additional_value
return sum_columns == column_to_equal
# This class defines the Expectation itself
# <snippet>
| MulticolumnValuesSumValuesEqualToSingleColumn |
python | aio-libs__aiohttp | tests/test_web_middleware.py | {
"start": 6275,
"end": 19054
} | class ____:
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 404),
("/resource2", 200),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 404),
("/resource2?p1=1&p2=2", 200),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 200),
("/resource2/a/b%2Fc/", 200),
],
)
async def test_add_trailing_when_necessary(
self, path: str, status: int, cli: CLI
) -> None:
extra_middlewares = [web.normalize_path_middleware(merge_slashes=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 200),
("/resource2", 404),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 200),
("/resource2?p1=1&p2=2", 404),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 404),
("/resource2/a/b%2Fc/", 200),
("/resource12", 404),
("/resource12345", 404),
],
)
async def test_remove_trailing_when_necessary(
self, path: str, status: int, cli: CLI
) -> None:
extra_middlewares = [
web.normalize_path_middleware(
append_slash=False, remove_slash=True, merge_slashes=False
)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1", 200),
("/resource1/", 404),
("/resource2", 404),
("/resource2/", 200),
("/resource1?p1=1&p2=2", 200),
("/resource1/?p1=1&p2=2", 404),
("/resource2?p1=1&p2=2", 404),
("/resource2/?p1=1&p2=2", 200),
("/resource2/a/b%2Fc", 404),
("/resource2/a/b%2Fc/", 200),
],
)
async def test_no_trailing_slash_when_disabled(
self, path: str, status: int, cli: CLI
) -> None:
extra_middlewares = [
web.normalize_path_middleware(append_slash=False, merge_slashes=False)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("//resource1//a//b", 200),
("//resource1//a//b/", 404),
("///resource1//a//b", 200),
("/////resource1/a///b", 200),
("/////resource1/a//b/", 404),
("/resource1/a/b?p=1", 200),
("//resource1//a//b?p=1", 200),
("//resource1//a//b/?p=1", 404),
("///resource1//a//b?p=1", 200),
("/////resource1/a///b?p=1", 200),
("/////resource1/a//b/?p=1", 404),
],
)
async def test_merge_slash(self, path: str, status: int, cli: CLI) -> None:
extra_middlewares = [web.normalize_path_middleware(append_slash=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("/resource1/a/b/", 404),
("//resource2//a//b", 200),
("//resource2//a//b/", 200),
("///resource1//a//b", 200),
("///resource1//a//b/", 404),
("/////resource1/a///b", 200),
("/////resource1/a///b/", 404),
("/resource2/a/b", 200),
("//resource2//a//b", 200),
("//resource2//a//b/", 200),
("///resource2//a//b", 200),
("///resource2//a//b/", 200),
("/////resource2/a///b", 200),
("/////resource2/a///b/", 200),
("/resource1/a/b?p=1", 200),
("/resource1/a/b/?p=1", 404),
("//resource2//a//b?p=1", 200),
("//resource2//a//b/?p=1", 200),
("///resource1//a//b?p=1", 200),
("///resource1//a//b/?p=1", 404),
("/////resource1/a///b?p=1", 200),
("/////resource1/a///b/?p=1", 404),
("/resource2/a/b?p=1", 200),
("//resource2//a//b?p=1", 200),
("//resource2//a//b/?p=1", 200),
("///resource2//a//b?p=1", 200),
("///resource2//a//b/?p=1", 200),
("/////resource2/a///b?p=1", 200),
("/////resource2/a///b/?p=1", 200),
],
)
async def test_append_and_merge_slash(
self, path: str, status: int, cli: CLI
) -> None:
extra_middlewares = [web.normalize_path_middleware()]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
@pytest.mark.parametrize(
"path, status",
[
("/resource1/a/b", 200),
("/resource1/a/b/", 200),
("//resource2//a//b", 404),
("//resource2//a//b/", 200),
("///resource1//a//b", 200),
("///resource1//a//b/", 200),
("/////resource1/a///b", 200),
("/////resource1/a///b/", 200),
("/////resource1/a///b///", 200),
("/resource2/a/b", 404),
("//resource2//a//b", 404),
("//resource2//a//b/", 200),
("///resource2//a//b", 404),
("///resource2//a//b/", 200),
("/////resource2/a///b", 404),
("/////resource2/a///b/", 200),
("/resource1/a/b?p=1", 200),
("/resource1/a/b/?p=1", 200),
("//resource2//a//b?p=1", 404),
("//resource2//a//b/?p=1", 200),
("///resource1//a//b?p=1", 200),
("///resource1//a//b/?p=1", 200),
("/////resource1/a///b?p=1", 200),
("/////resource1/a///b/?p=1", 200),
("/resource2/a/b?p=1", 404),
("//resource2//a//b?p=1", 404),
("//resource2//a//b/?p=1", 200),
("///resource2//a//b?p=1", 404),
("///resource2//a//b/?p=1", 200),
("/////resource2/a///b?p=1", 404),
("/////resource2/a///b/?p=1", 200),
],
)
async def test_remove_and_merge_slash(
self, path: str, status: int, cli: CLI
) -> None:
extra_middlewares = [
web.normalize_path_middleware(append_slash=False, remove_slash=True)
]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
assert resp.url.query == URL(path).query
async def test_cannot_remove_and_add_slash(self) -> None:
with pytest.raises(AssertionError):
web.normalize_path_middleware(append_slash=True, remove_slash=True)
@pytest.mark.parametrize(
["append_slash", "remove_slash"],
[
(True, False),
(False, True),
(False, False),
],
)
async def test_open_redirects(
self, append_slash: bool, remove_slash: bool, aiohttp_client: AiohttpClient
) -> None:
async def handle(request: web.Request) -> web.StreamResponse:
pytest.fail(
"Security advisory 'GHSA-v6wp-4m6f-gcjg' test handler "
"matched unexpectedly",
pytrace=False,
)
app = web.Application(
middlewares=[
web.normalize_path_middleware(
append_slash=append_slash, remove_slash=remove_slash
)
]
)
app.add_routes([web.get("/", handle), web.get("/google.com", handle)])
client = await aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
resp = await client.get("//google.com", allow_redirects=False)
assert resp.status == 308
assert resp.headers["Location"] == "/google.com"
assert resp.url.query == URL("//google.com").query
async def test_bug_3669(aiohttp_client: AiohttpClient) -> None:
async def paymethod(request: web.Request) -> NoReturn:
assert False
app = web.Application()
app.router.add_route("GET", "/paymethod", paymethod)
app.middlewares.append(
web.normalize_path_middleware(append_slash=False, remove_slash=True)
)
client = await aiohttp_client(app, server_kwargs={"skip_url_asserts": True})
resp = await client.get("/paymethods")
assert resp.status == 404
assert resp.url.path != "/paymethod"
async def test_old_style_middleware(
loop: asyncio.AbstractEventLoop, aiohttp_client: AiohttpClient
) -> None:
async def view_handler(request: web.Request) -> web.Response:
return web.Response(body=b"OK")
with pytest.deprecated_call(
match=r"^Middleware decorator is deprecated since 4\.0 and its "
r"behaviour is default, you can simply remove this decorator\.$",
):
@web.middleware
async def middleware(request: web.Request, handler: Handler) -> web.Response:
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
assert isinstance(resp, web.Response)
assert resp.text is not None
resp.text = resp.text + "[old style middleware]"
return resp
app = web.Application(middlewares=[middleware])
app.router.add_route("GET", "/", view_handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[old style middleware]" == txt
async def test_new_style_middleware_class(
loop: asyncio.AbstractEventLoop, aiohttp_client: AiohttpClient
) -> None:
async def handler(request: web.Request) -> web.Response:
return web.Response(body=b"OK")
class Middleware:
async def __call__(
self, request: web.Request, handler: Handler
) -> web.Response:
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
assert isinstance(resp, web.Response)
assert resp.text is not None
resp.text = resp.text + "[new style middleware]"
return resp
app = web.Application()
app.middlewares.append(Middleware())
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[new style middleware]" == txt
async def test_new_style_middleware_method(
loop: asyncio.AbstractEventLoop, aiohttp_client: AiohttpClient
) -> None:
async def handler(request: web.Request) -> web.Response:
return web.Response(body=b"OK")
class Middleware:
async def call(self, request: web.Request, handler: Handler) -> web.Response:
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
assert isinstance(resp, web.Response)
assert resp.text is not None
resp.text = resp.text + "[new style middleware]"
return resp
app = web.Application()
app.middlewares.append(Middleware().call)
app.router.add_route("GET", "/", handler)
client = await aiohttp_client(app)
resp = await client.get("/")
assert 201 == resp.status
txt = await resp.text()
assert "OK[new style middleware]" == txt
async def test_middleware_does_not_leak(aiohttp_client: AiohttpClient) -> None:
async def any_handler(request: web.Request) -> NoReturn:
assert False
class Middleware:
async def call(
self, request: web.Request, handler: Handler
) -> web.StreamResponse:
return await handler(request)
app = web.Application()
app.router.add_route("POST", "/any", any_handler)
app.middlewares.append(Middleware().call)
client = await aiohttp_client(app)
web_app._cached_build_middleware.cache_clear()
for _ in range(10):
resp = await client.get("/any")
assert resp.status == 405
assert web_app._cached_build_middleware.cache_info().currsize < 10
| TestNormalizePathMiddleware |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_engine.py | {
"start": 21692,
"end": 23583
} | class ____(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
@testing.variation("enable_comments", [True, False])
def test_comments_enabled_disabled(
self, testing_engine, metadata, enable_comments
):
Table(
"tbl_with_comments",
metadata,
Column(
"id",
Integer,
primary_key=True,
comment="pk comment",
),
Column("no_comment", Integer),
Column(
"has_comment",
String(20),
comment="has the comment",
),
comment="table comment",
)
eng = testing_engine(
options={"supports_comments": bool(enable_comments)}
)
metadata.create_all(eng)
insp = inspect(testing.db)
if enable_comments:
eq_(
insp.get_table_comment("tbl_with_comments"),
{"text": "table comment"},
)
cols = {
col["name"]: col["comment"]
for col in insp.get_columns("tbl_with_comments")
}
eq_(
cols,
{
"id": "pk comment",
"no_comment": None,
"has_comment": "has the comment",
},
)
else:
eq_(
insp.get_table_comment("tbl_with_comments"),
{"text": None},
)
cols = {
col["name"]: col["comment"]
for col in insp.get_columns("tbl_with_comments")
}
eq_(
cols,
{
"id": None,
"no_comment": None,
"has_comment": None,
},
)
| MiscTest |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 32373,
"end": 35992
} | class ____(forms.Form):
"""Project translation form."""
project = forms.ChoiceField()
def __init__(self, *args, **kwargs):
self.parent = kwargs.pop("parent", None)
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
self.fields["project"].choices = self.get_choices()
def get_choices(self):
return [
(
project.slug,
"{project} ({lang})".format(
project=project.slug,
lang=project.get_language_display(),
),
)
for project in self.get_translation_queryset().all()
]
def clean(self):
if not self.parent.supports_translations:
raise forms.ValidationError(
_(
"This project is configured with a versioning scheme that doesn't support translations."
),
)
return super().clean()
def clean_project(self):
"""Ensures that selected project is valid as a translation."""
translation_project_slug = self.cleaned_data["project"]
# Ensure parent project isn't already itself a translation
if self.parent.main_language_project is not None:
msg = 'Project "{project}" is already a translation'
raise forms.ValidationError(
(_(msg).format(project=self.parent.slug)),
)
project_translation_qs = self.get_translation_queryset().filter(
slug=translation_project_slug,
)
if not project_translation_qs.exists():
msg = 'Project "{project}" does not exist.'
raise forms.ValidationError(
(_(msg).format(project=translation_project_slug)),
)
self.translation = project_translation_qs.first()
if self.translation.language == self.parent.language:
msg = "Both projects can not have the same language ({lang})."
raise forms.ValidationError(
_(msg).format(lang=self.parent.get_language_display()),
)
# yapf: disable
exists_translation = (
self.parent.translations
.filter(language=self.translation.language)
.exists()
)
# yapf: enable
if exists_translation:
msg = "This project already has a translation for {lang}."
raise forms.ValidationError(
_(msg).format(lang=self.translation.get_language_display()),
)
is_parent = self.translation.translations.exists()
if is_parent:
msg = "A project with existing translations can not be added as a project translation."
raise forms.ValidationError(_(msg))
return translation_project_slug
def get_translation_queryset(self):
queryset = (
Project.objects.for_admin_user(self.user)
.filter(main_language_project=None)
.exclude(pk=self.parent.pk)
)
return queryset
def save(self, commit=True):
if commit:
# Don't use ``self.parent.translations.add()`` here as this
# triggers a problem with database routing and multiple databases.
# Directly set the ``main_language_project`` instead of doing a
# bulk update.
self.translation.main_language_project = self.parent
self.translation.save()
# Run other sync logic to make sure we are in a good state.
self.parent.save()
return self.parent
| TranslationBaseForm |
python | doocs__leetcode | solution/0400-0499/0471.Encode String with Shortest Length/Solution.py | {
"start": 0,
"end": 751
} | class ____:
def encode(self, s: str) -> str:
def g(i: int, j: int) -> str:
t = s[i : j + 1]
if len(t) < 5:
return t
k = (t + t).index(t, 1)
if k < len(t):
cnt = len(t) // k
return f"{cnt}[{f[i][i + k - 1]}]"
return t
n = len(s)
f = [[None] * n for _ in range(n)]
for i in range(n - 1, -1, -1):
for j in range(i, n):
f[i][j] = g(i, j)
if j - i + 1 > 4:
for k in range(i, j):
t = f[i][k] + f[k + 1][j]
if len(f[i][j]) > len(t):
f[i][j] = t
return f[0][-1]
| Solution |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 181,
"end": 251
} | class ____(ASTNode):
doc_string: Optional[str]
@dataclass
| Definition |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 34449,
"end": 34543
} | class ____(StringEnum):
frame = "frame"
source = "source"
roi = "roi"
| SchemaTypeEnum |
python | pandas-dev__pandas | pandas/tests/internals/test_internals.py | {
"start": 30079,
"end": 37736
} | class ____:
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr("f8", N),
create_single_mgr("i8", N),
# 2-dim
create_mgr("a,b,c,d,e,f: f8", item_shape=(N,)),
create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)),
create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)),
create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)),
]
@pytest.mark.parametrize("mgr", MANAGERS)
def test_get_slice(self, mgr):
def assert_slice_ok(mgr, axis, slobj):
mat = _as_array(mgr)
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate(
[slobj, np.zeros(len(ax) - len(slobj), dtype=bool)]
)
if isinstance(slobj, slice):
sliced = mgr.get_slice(slobj, axis=axis)
elif (
mgr.ndim == 1
and axis == 0
and isinstance(slobj, np.ndarray)
and slobj.dtype == bool
):
sliced = mgr.get_rows_with_mask(slobj)
else:
# BlockManager doesn't support non-slice, SingleBlockManager
# doesn't support axis > 0
raise TypeError(slobj)
mat_slobj = (slice(None),) * axis + (slobj,)
tm.assert_numpy_array_equal(
mat[mat_slobj], _as_array(sliced), check_dtype=False
)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
assert mgr.ndim <= 2, mgr.ndim
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
if mgr.ndim < 2:
# 2D only support slice objects
# boolean mask
assert_slice_ok(mgr, ax, np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(mgr, ax, np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array([True, True, False], dtype=np.bool_)
)
@pytest.mark.parametrize("mgr", MANAGERS)
def test_take(self, mgr):
def assert_take_ok(mgr, axis, indexer):
mat = _as_array(mgr)
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(
np.take(mat, indexer, axis), _as_array(taken), check_dtype=False
)
tm.assert_index_equal(mgr.axes[axis].take(indexer), taken.axes[axis])
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, indexer=np.array([], dtype=np.intp))
assert_take_ok(mgr, ax, indexer=np.array([0, 0, 0], dtype=np.intp))
assert_take_ok(
mgr, ax, indexer=np.array(list(range(mgr.shape[ax])), dtype=np.intp)
)
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, indexer=np.array([0, 1, 2], dtype=np.intp))
assert_take_ok(mgr, ax, indexer=np.array([-1, -2, -3], dtype=np.intp))
@pytest.mark.parametrize("mgr", MANAGERS)
@pytest.mark.parametrize("fill_value", [None, np.nan, 100.0])
def test_reindex_axis(self, fill_value, mgr):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = _as_array(mgr)
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis, fill_value=fill_value)
tm.assert_numpy_array_equal(
algos.take_nd(mat, indexer, axis, fill_value=fill_value),
_as_array(reindexed),
check_dtype=False,
)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for ax in range(mgr.ndim):
assert_reindex_axis_is_ok(mgr, ax, Index([]), fill_value)
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax], fill_value)
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][[0, 0, 0]], fill_value)
assert_reindex_axis_is_ok(mgr, ax, Index(["foo", "bar", "baz"]), fill_value)
assert_reindex_axis_is_ok(
mgr, ax, Index(["foo", mgr.axes[ax][0], "baz"]), fill_value
)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][:-3], fill_value)
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax][-3::-1], fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value
)
@pytest.mark.parametrize("mgr", MANAGERS)
@pytest.mark.parametrize("fill_value", [None, np.nan, 100.0])
def test_reindex_indexer(self, fill_value, mgr):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value):
mat = _as_array(mgr)
reindexed_mat = algos.take_nd(mat, indexer, axis, fill_value=fill_value)
reindexed = mgr.reindex_indexer(
new_labels, indexer, axis, fill_value=fill_value
)
tm.assert_numpy_array_equal(
reindexed_mat, _as_array(reindexed), check_dtype=False
)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for ax in range(mgr.ndim):
assert_reindex_indexer_is_ok(
mgr, ax, Index([]), np.array([], dtype=np.intp), fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value
)
assert_reindex_indexer_is_ok(
mgr,
ax,
Index(["foo"] * mgr.shape[ax]),
np.arange(mgr.shape[ax]),
fill_value,
)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, Index(["foo", "bar", "baz"]), np.array([0, 0, 0]), fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, Index(["foo", "bar", "baz"]), np.array([-1, 0, -1]), fill_value
)
assert_reindex_indexer_is_ok(
mgr,
ax,
Index(["foo", mgr.axes[ax][0], "baz"]),
np.array([-1, -1, -1]),
fill_value,
)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr,
ax,
Index(["foo", "bar", "baz"]),
np.array([0, 1, 2]),
fill_value,
)
| TestIndexing |
python | nryoung__algorithms | algorithms/data_structures/singly_linked_list.py | {
"start": 843,
"end": 2125
} | class ____:
def __init__(self):
self.head = None
self.size = 0
def add(self, value):
"""
Add element to list
Time Complexity: O(N)
"""
node = Node(value)
node.set_next(self.head)
self.head = node
self.size += 1
def _search_node(self, value, remove=False):
current = self.head
previous = None
while current:
if current.data == value:
break
else:
previous = current
current = current.next
if remove and current:
if previous is None: # Head node
self.head = current.next
else: # None head node
previous.set_next(current.next)
self.size -= 1
return current is not None
def remove(self, value):
"""
Remove element from list
Time Complexity: O(N)
"""
return self._search_node(value, True)
def search(self, value):
"""
Search for value in list
Time Complexity: O(N)
"""
return self._search_node(value)
def size(self):
"""
Return size of list
"""
return self.size
| SinglyLinkedList |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_annotations/allow_star_arg_any.py | {
"start": 439,
"end": 1021
} | class ____:
# OK
def foo_method(self, a: int, *params: str, **options: str) -> int:
pass
# ANN401
def foo_method(self, a: Any, *params: str, **options: str) -> int:
pass
# ANN401
def foo_method(self, a: int, *params: str, **options: str) -> Any:
pass
# OK
def foo_method(self, a: int, *params: Any, **options: Any) -> int:
pass
# OK
def foo_method(self, a: int, *params: Any, **options: str) -> int:
pass
# OK
def foo_method(self, a: int, *params: str, **options: Any) -> int:
pass
| Bar |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_hash.py | {
"start": 291,
"end": 462
} | class ____:
def __hash__(self):
print("ruff") # [invalid-hash-return]
# TODO: Once Ruff has better type checking
def return_int():
return "3"
| HashNoReturn |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-convert-string-ii.py | {
"start": 8241,
"end": 10751
} | class ____(object):
def minimumCost(self, source, target, original, changed, cost):
"""
:type source: str
:type target: str
:type original: List[str]
:type changed: List[str]
:type cost: List[int]
:rtype: int
"""
INF = float("inf")
def floydWarshall(dist):
for k in dist.iterkeys():
for i in dist.iterkeys():
if dist[i][k] == INF:
continue
for j in dist.iterkeys():
if dist[k][j] == INF:
continue
dist[i][j] = min(dist[i][j], dist[i][k]+dist[k][j])
lookup = {}
buckets = collections.defaultdict(list)
for x in itertools.chain(original, changed):
l = len(x)
if x in lookup:
continue
lookup[x] = len(lookup)
buckets[len(x)].append(lookup[x])
dists = {l:{u:{v:0 if u == v else INF for v in lookup} for u in lookup} for l, lookup in buckets.iteritems()}
for i in xrange(len(original)):
l = len(original[i])
dist = dists[l]
u, v = lookup[original[i]], lookup[changed[i]]
dist[u][v] = min(dist[u][v], cost[i])
for dist in dists.itervalues():
floydWarshall(dist)
candidates = {len(x) for x in original}
dp = [INF]*(max(len(x) for x in original)+1)
dp[0] = 0
for i in xrange(len(source)):
if dp[i%len(dp)] == INF:
continue
if source[i] == target[i]:
dp[(i+1)%len(dp)] = min(dp[(i+1)%len(dp)], dp[i%len(dp)])
for l in candidates:
if i+l > len(source):
continue
dist = dists[l]
u, v = source[i:i+l], target[i:i+l]
if u in lookup and v in lookup:
dp[(i+l)%len(dp)] = min(dp[(i+l)%len(dp)], dp[i%len(dp)]+dist[lookup[u]][lookup[v]])
dp[i%len(dp)] = INF
return dp[len(source)%len(dp)] if dp[len(source)%len(dp)] != INF else -1
# Time: O(o * l + k * eloge + n * l), e is the number of edges reachable from a given node u, o = len(original), l = max(len(x) for x in original), k = trie.k
# Space: O(t + k * v + l), v is the number of nodes reachable from a given node u
import itertools
import heapq
# trie, dijkstra's algorithm, dp, memoization
| Solution4 |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/higher_order_functions.py | {
"start": 449,
"end": 1792
} | class ____:
def method_to_sink(self, arg):
_test_sink(arg)
def self_to_sink(self):
_test_sink(self)
def higher_order_method(c: C, arg):
higher_order_function(c.method_to_sink, arg) # Expect an issue (False negative)
def test_higher_order_method():
higher_order_method(C(), _test_source())
def test_higher_order_method_self():
c: C = _test_source()
higher_order_function(c.self_to_sink)
def higher_order_function_and_sink(f, arg):
f(arg)
_test_sink(arg)
def test_higher_order_function_and_sink():
higher_order_function_and_sink(goes_to_sink, _test_source())
def test_higher_order_tito(x):
# no tito because higher_order_function does not return.
return higher_order_function(has_tito, x)
def apply(f, x):
return f(x)
def test_apply_tito(x):
return apply(has_tito, x)
def source_through_tito():
x = _test_source()
y = apply(has_tito, x)
return y
def test_apply_source():
return apply(_test_source, 0)
def sink_after_apply(f):
_test_sink(f())
def test_parameterized_target_in_issue_handle():
sink_after_apply(_test_source)
def apply_without_return(f, x) -> None:
f(x)
def test_apply_without_return():
apply_without_return(_test_sink, _test_source()) # Issue
apply_without_return(str, _test_source()) # No issue
| C |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 3813,
"end": 4676
} | class ____(SentencePieceExtractor):
def extract(self, vocab_scores=None) -> tuple[dict[str, int], list[tuple]]:
"""
By default will return vocab and merges with respect to their order, by sending `vocab_scores` we're going to
order the merges with respect to the piece scores instead.
"""
sp = self.sp
vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())}
# If "\t" is missing in the vocab, we have to do this to support merges
# "<0x09>" is the bytefallback for `\t`
if "\t" not in vocab:
vocab["\t"] = vocab.get("<0x09>")
merges = generate_merges(vocab, vocab_scores)
return vocab, merges
def check_number_comma(piece: str) -> bool:
return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit()
| GemmaSentencePieceExtractor |
python | django__django | tests/check_framework/test_security.py | {
"start": 5243,
"end": 6953
} | class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False,
)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [csrf.W016])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE="1",
)
def test_with_csrf_cookie_secure_truthy(self):
"""CSRF_COOKIE_SECURE must be boolean."""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [csrf.W016])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_USE_SESSIONS=True,
CSRF_COOKIE_SECURE=False,
)
def test_use_sessions_with_csrf_cookie_secure_false(self):
"""
No warning if CSRF_COOKIE_SECURE isn't True while CSRF_USE_SESSIONS
is True.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
@override_settings(MIDDLEWARE=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True,
)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(csrf.check_csrf_cookie_secure(None), [])
| CheckCSRFCookieSecureTest |
python | aimacode__aima-python | deep_learning4e.py | {
"start": 1077,
"end": 1285
} | class ____:
def function(self, x):
return NotImplementedError
def derivative(self, x):
return NotImplementedError
def __call__(self, x):
return self.function(x)
| Activation |
python | conda__conda | conda/models/records.py | {
"start": 20958,
"end": 21446
} | class ____(PackageRecord):
"""Representation of a package that has been returned as part of a solver solution.
This sits between :class:`PackageRecord` and :class:`PrefixRecord`, simply adding
``requested_spec`` so it can be used in lockfiles without requiring the artifact on
disk.
"""
#: str: The :class:`MatchSpec` that the user requested or ``None`` if the package it was installed as a dependency.
requested_spec = StringField(required=False)
| SolvedRecord |
python | apache__airflow | task-sdk/tests/task_sdk/api/test_client.py | {
"start": 42827,
"end": 53846
} | class ____:
def test_trigger(self):
# Simulate a successful response from the server when triggering a dag run
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_trigger/test_run_id":
actual_body = json.loads(request.read())
assert actual_body["logical_date"] == "2025-01-01T00:00:00Z"
assert actual_body["conf"] == {}
# Since the value for `reset_dag_run` is default, it should not be present in the request body
assert "reset_dag_run" not in actual_body
return httpx.Response(status_code=204)
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.trigger(
dag_id="test_trigger", run_id="test_run_id", logical_date=timezone.datetime(2025, 1, 1)
)
assert result == OKResponse(ok=True)
def test_trigger_conflict(self):
"""Test that if the dag run already exists, the client returns an error when default reset_dag_run=False"""
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_trigger_conflict/test_run_id":
return httpx.Response(
status_code=409,
json={
"detail": {
"reason": "already_exists",
"message": "A Dag Run already exists for Dag test_trigger_conflict with run id test_run_id",
}
},
)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.trigger(dag_id="test_trigger_conflict", run_id="test_run_id")
assert result == ErrorResponse(error=ErrorType.DAGRUN_ALREADY_EXISTS)
def test_trigger_conflict_reset_dag_run(self):
"""Test that if dag run already exists and reset_dag_run=True, the client clears the dag run"""
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_trigger_conflict_reset/test_run_id":
return httpx.Response(
status_code=409,
json={
"detail": {
"reason": "already_exists",
"message": "A Dag Run already exists for Dag test_trigger_conflict with run id test_run_id",
}
},
)
if request.url.path == "/dag-runs/test_trigger_conflict_reset/test_run_id/clear":
return httpx.Response(status_code=204)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.trigger(
dag_id="test_trigger_conflict_reset",
run_id="test_run_id",
reset_dag_run=True,
)
assert result == OKResponse(ok=True)
def test_clear(self):
"""Test that the client can clear a dag run"""
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_clear/test_run_id/clear":
return httpx.Response(status_code=204)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.clear(dag_id="test_clear", run_id="test_run_id")
assert result == OKResponse(ok=True)
def test_get_state(self):
"""Test that the client can get the state of a dag run"""
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_state/test_run_id/state":
return httpx.Response(
status_code=200,
json={"state": "running"},
)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_state(dag_id="test_state", run_id="test_run_id")
assert result == DagRunStateResponse(state=DagRunState.RUNNING)
def test_get_count_basic(self):
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/count":
assert request.url.params["dag_id"] == "test_dag"
return httpx.Response(status_code=200, json=1)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_count(dag_id="test_dag")
assert result.count == 1
def test_get_count_with_states(self):
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/count":
assert request.url.params["dag_id"] == "test_dag"
assert request.url.params.get_list("states") == ["success", "failed"]
return httpx.Response(status_code=200, json=2)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_count(dag_id="test_dag", states=["success", "failed"])
assert result.count == 2
def test_get_count_with_logical_dates(self):
logical_dates = [timezone.datetime(2025, 1, 1), timezone.datetime(2025, 1, 2)]
logical_dates_str = [d.isoformat() for d in logical_dates]
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/count":
assert request.url.params["dag_id"] == "test_dag"
assert request.url.params.get_list("logical_dates") == logical_dates_str
return httpx.Response(status_code=200, json=2)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_count(
dag_id="test_dag", logical_dates=[timezone.datetime(2025, 1, 1), timezone.datetime(2025, 1, 2)]
)
assert result.count == 2
def test_get_count_with_run_ids(self):
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/count":
assert request.url.params["dag_id"] == "test_dag"
assert request.url.params.get_list("run_ids") == ["run1", "run2"]
return httpx.Response(status_code=200, json=2)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_count(dag_id="test_dag", run_ids=["run1", "run2"])
assert result.count == 2
def test_get_previous_basic(self):
"""Test basic get_previous functionality with dag_id and logical_date."""
logical_date = datetime(2024, 1, 15, 12, 0, 0, tzinfo=timezone.utc)
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_dag/previous":
assert request.url.params["logical_date"] == logical_date.isoformat()
# Return complete DagRun data
return httpx.Response(
status_code=200,
json={
"dag_id": "test_dag",
"run_id": "prev_run",
"logical_date": "2024-01-14T12:00:00+00:00",
"start_date": "2024-01-14T12:05:00+00:00",
"run_after": "2024-01-14T12:00:00+00:00",
"run_type": "scheduled",
"state": "success",
"consumed_asset_events": [],
},
)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_previous(dag_id="test_dag", logical_date=logical_date)
assert isinstance(result, PreviousDagRunResult)
assert result.dag_run.dag_id == "test_dag"
assert result.dag_run.run_id == "prev_run"
assert result.dag_run.state == "success"
def test_get_previous_with_state_filter(self):
"""Test get_previous functionality with state filtering."""
logical_date = datetime(2024, 1, 15, 12, 0, 0, tzinfo=timezone.utc)
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_dag/previous":
assert request.url.params["logical_date"] == logical_date.isoformat()
assert request.url.params["state"] == "success"
# Return complete DagRun data
return httpx.Response(
status_code=200,
json={
"dag_id": "test_dag",
"run_id": "prev_success_run",
"logical_date": "2024-01-14T12:00:00+00:00",
"start_date": "2024-01-14T12:05:00+00:00",
"run_after": "2024-01-14T12:00:00+00:00",
"run_type": "scheduled",
"state": "success",
"consumed_asset_events": [],
},
)
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_previous(dag_id="test_dag", logical_date=logical_date, state="success")
assert isinstance(result, PreviousDagRunResult)
assert result.dag_run.dag_id == "test_dag"
assert result.dag_run.run_id == "prev_success_run"
assert result.dag_run.state == "success"
def test_get_previous_not_found(self):
"""Test get_previous when no previous Dag run exists returns None."""
logical_date = datetime(2024, 1, 15, 12, 0, 0, tzinfo=timezone.utc)
def handle_request(request: httpx.Request) -> httpx.Response:
if request.url.path == "/dag-runs/test_dag/previous":
assert request.url.params["logical_date"] == logical_date.isoformat()
# Return None (null) when no previous Dag run found
return httpx.Response(status_code=200, content="null")
return httpx.Response(status_code=422)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.dag_runs.get_previous(dag_id="test_dag", logical_date=logical_date)
assert isinstance(result, PreviousDagRunResult)
assert result.dag_run is None
| TestDagRunOperations |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict12.py | {
"start": 971,
"end": 1034
} | class ____(TypedDict):
foo: int
baz: NotRequired[int]
| TD3 |
python | sympy__sympy | sympy/polys/agca/extensions.py | {
"start": 5442,
"end": 9633
} | class ____(Domain):
r"""
Finite extension generated by an integral element.
The generator is defined by a monic univariate
polynomial derived from the argument ``mod``.
A shorter alias is ``FiniteExtension``.
Examples
========
Quadratic integer ring $\mathbb{Z}[\sqrt2]$:
>>> from sympy import Symbol, Poly
>>> from sympy.polys.agca.extensions import FiniteExtension
>>> x = Symbol('x')
>>> R = FiniteExtension(Poly(x**2 - 2)); R
ZZ[x]/(x**2 - 2)
>>> R.rank
2
>>> R(1 + x)*(3 - 2*x)
x - 1
Finite field $GF(5^3)$ defined by the primitive
polynomial $x^3 + x^2 + 2$ (over $\mathbb{Z}_5$).
>>> F = FiniteExtension(Poly(x**3 + x**2 + 2, modulus=5)); F
GF(5)[x]/(x**3 + x**2 + 2)
>>> F.basis
(1, x, x**2)
>>> F(x + 3)/(x**2 + 2)
-2*x**2 + x + 2
Function field of an elliptic curve:
>>> t = Symbol('t')
>>> FiniteExtension(Poly(t**2 - x**3 - x + 1, t, field=True))
ZZ(x)[t]/(t**2 - x**3 - x + 1)
"""
is_FiniteExtension = True
dtype = ExtensionElement
def __init__(self, mod):
if not (isinstance(mod, Poly) and mod.is_univariate):
raise TypeError("modulus must be a univariate Poly")
# Using auto=True (default) potentially changes the ground domain to a
# field whereas auto=False raises if division is not exact. We'll let
# the caller decide whether or not they want to put the ground domain
# over a field. In most uses mod is already monic.
mod = mod.monic(auto=False)
self.rank = mod.degree()
self.modulus = mod
self.mod = mod.rep # DMP representation
self.domain = dom = mod.domain
self.ring = dom.old_poly_ring(*mod.gens)
self.zero = self.convert(self.ring.zero)
self.one = self.convert(self.ring.one)
gen = self.ring.gens[0]
self.symbol = self.ring.symbols[0]
self.generator = self.convert(gen)
self.basis = tuple(self.convert(gen**i) for i in range(self.rank))
# XXX: It might be necessary to check mod.is_irreducible here
self.is_Field = self.domain.is_Field
def new(self, arg):
rep = self.ring.convert(arg)
return ExtElem(rep % self.mod, self)
def __eq__(self, other):
if not isinstance(other, FiniteExtension):
return False
return self.modulus == other.modulus
def __hash__(self):
return hash((self.__class__.__name__, self.modulus))
def __str__(self):
return "%s/(%s)" % (self.ring, self.modulus.as_expr())
__repr__ = __str__
@property
def has_CharacteristicZero(self):
return self.domain.has_CharacteristicZero
def characteristic(self):
return self.domain.characteristic()
def convert(self, f, base=None):
rep = self.ring.convert(f, base)
return ExtElem(rep % self.mod, self)
def convert_from(self, f, base):
rep = self.ring.convert(f, base)
return ExtElem(rep % self.mod, self)
def to_sympy(self, f):
return self.ring.to_sympy(f.rep)
def from_sympy(self, f):
return self.convert(f)
def set_domain(self, K):
mod = self.modulus.set_domain(K)
return self.__class__(mod)
def drop(self, *symbols):
if self.symbol in symbols:
raise GeneratorsError('Can not drop generator from FiniteExtension')
K = self.domain.drop(*symbols)
return self.set_domain(K)
def quo(self, f, g):
return self.exquo(f, g)
def exquo(self, f, g):
ring = self.ring
try:
rep = ring.exquo(f.rep, g.rep)
except ExactQuotientFailed:
if not ring.domain.is_Field:
raise
ginv = ring.invert(g.rep, self.mod)
rep = ring.mul(f.rep, ginv)
return ExtElem(rep % self.mod, self)
def is_negative(self, a):
return False
def is_unit(self, a):
if self.is_Field:
return bool(a)
elif a.is_ground:
return self.domain.is_unit(a.to_ground())
FiniteExtension = MonogenicFiniteExtension
| MonogenicFiniteExtension |
python | ansible__ansible | lib/ansible/plugins/loader.py | {
"start": 4676,
"end": 12272
} | class ____(object):
def __init__(self, plugin_type: str, legacy_package_name: str) -> None:
self.original_name: str | None = None
self.redirect_list: list[str] = []
self.raw_error_list: list[Exception] = []
"""All exception instances encountered during the plugin load."""
self.error_list: list[str] = []
"""Stringified exceptions, excluding import errors."""
self.import_error_list: list[Exception] = []
"""All ImportError exception instances encountered during the plugin load."""
self.load_attempts: list[str] = []
self.pending_redirect: str | None = None
self.exit_reason: str | None = None
self.plugin_resolved_path: str | None = None
self.plugin_resolved_name: str | None = None
"""For collection plugins, the resolved Python module FQ __name__; for non-collections, the short name."""
self.plugin_resolved_collection: str | None = None # empty string for resolved plugins from user-supplied paths
"""For collection plugins, the resolved collection {ns}.{col}; empty string for non-collection plugins."""
self.deprecated: bool = False
self.removal_date: str | None = None
self.removal_version: str | None = None
self.deprecation_warnings: list[str] = []
self.resolved: bool = False
self._resolved_fqcn: str | None = None
self.action_plugin: str | None = None
self._plugin_type: str = plugin_type
"""The type of the plugin."""
self._legacy_package_name = legacy_package_name
"""The legacy sys.modules package name from the plugin loader instance; stored to prevent potentially incorrect manual computation."""
self._python_module_name: str | None = None
"""
The fully qualified Python module name for the plugin (accessible via `sys.modules`).
For non-collection non-core plugins, this may include a non-existent synthetic package element with a hash of the file path to avoid collisions.
"""
@property
def resolved_fqcn(self) -> str | None:
if not self.resolved:
return None
if not self._resolved_fqcn:
final_plugin = self.redirect_list[-1]
if AnsibleCollectionRef.is_valid_fqcr(final_plugin) and final_plugin.startswith('ansible.legacy.'):
final_plugin = final_plugin.split('ansible.legacy.')[-1]
if self.plugin_resolved_collection and not AnsibleCollectionRef.is_valid_fqcr(final_plugin):
final_plugin = self.plugin_resolved_collection + '.' + final_plugin
self._resolved_fqcn = final_plugin
return self._resolved_fqcn
def record_deprecation(self, name: str, deprecation: dict[str, t.Any] | None, collection_name: str) -> t.Self:
if not deprecation:
return self
# The `or ''` instead of using `.get(..., '')` makes sure that even if the user explicitly
# sets `warning_text` to `~` (None) or `false`, we still get an empty string.
warning_text = deprecation.get('warning_text', None) or ''
removal_date = deprecation.get('removal_date', None)
removal_version = deprecation.get('removal_version', None)
# If both removal_date and removal_version are specified, use removal_date
if removal_date is not None:
removal_version = None
warning_text = '{0} has been deprecated.{1}{2}'.format(name, ' ' if warning_text else '', warning_text)
display.deprecated( # pylint: disable=ansible-deprecated-date-not-permitted,ansible-deprecated-unnecessary-collection-name
msg=warning_text,
date=removal_date,
version=removal_version,
deprecator=deprecator_from_collection_name(collection_name),
)
self.deprecated = True
if removal_date:
self.removal_date = removal_date
if removal_version:
self.removal_version = removal_version
self.deprecation_warnings.append(warning_text)
return self
def resolve(self, resolved_name: str, resolved_path: str, resolved_collection: str, exit_reason: str, action_plugin: str) -> t.Self:
"""Record a resolved collection plugin."""
self.pending_redirect = None
self.plugin_resolved_name = resolved_name
self.plugin_resolved_path = resolved_path
self.plugin_resolved_collection = resolved_collection
self.exit_reason = exit_reason
self._python_module_name = resolved_name
self.resolved = True
self.action_plugin = action_plugin
return self
def resolve_legacy(self, name: str, pull_cache: dict[str, PluginPathContext]) -> t.Self:
"""Record a resolved legacy plugin."""
plugin_path_context = pull_cache[name]
self.plugin_resolved_name = name
self.plugin_resolved_path = plugin_path_context.path
self.plugin_resolved_collection = 'ansible.builtin' if plugin_path_context.internal else ''
self._resolved_fqcn = 'ansible.builtin.' + name if plugin_path_context.internal else name
self._python_module_name = self._make_legacy_python_module_name()
self.resolved = True
return self
def resolve_legacy_jinja_plugin(self, name: str, known_plugin: AnsibleJinja2Plugin) -> t.Self:
"""Record a resolved legacy Jinja plugin."""
internal = known_plugin.ansible_name.startswith('ansible.builtin.')
self.plugin_resolved_name = name
self.plugin_resolved_path = known_plugin._original_path
self.plugin_resolved_collection = 'ansible.builtin' if internal else ''
self._resolved_fqcn = known_plugin.ansible_name
self._python_module_name = self._make_legacy_python_module_name()
self.resolved = True
return self
def redirect(self, redirect_name: str) -> t.Self:
self.pending_redirect = redirect_name
self.exit_reason = 'pending redirect resolution from {0} to {1}'.format(self.original_name, redirect_name)
self.resolved = False
return self
def nope(self, exit_reason: str) -> t.Self:
self.pending_redirect = None
self.exit_reason = exit_reason
self.resolved = False
return self
def _make_legacy_python_module_name(self) -> str:
"""
Generate a fully-qualified Python module name for a legacy/builtin plugin.
The same package namespace is shared for builtin and legacy plugins.
Explicit requests for builtins via `ansible.builtin` are handled elsewhere with an aliased collection package resolved by the collection loader.
Only unqualified and `ansible.legacy`-qualified requests land here; whichever plugin is visible at the time will end up in sys.modules.
Filter and test plugin host modules receive special name suffixes to avoid collisions unrelated to the actual plugin name.
"""
name = os.path.splitext(self.plugin_resolved_path)[0]
basename = os.path.basename(name)
if self._plugin_type in ('filter', 'test'):
# Unlike other plugin types, filter and test plugin names are independent of the file where they are defined.
# As a result, the Python module name must be derived from the full path of the plugin.
# This prevents accidental shadowing of unrelated plugins of the same type.
basename += f'_{abs(hash(self.plugin_resolved_path))}'
return f'{self._legacy_package_name}.{basename}'
| PluginLoadContext |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/trad_relationship_uselist.py | {
"start": 1285,
"end": 4443
} | class ____(Base):
__tablename__ = "address"
id = mapped_column(Integer, primary_key=True)
user_id = mapped_column(ForeignKey("user.id"))
email = mapped_column(String, nullable=False)
user_style_one = relationship(User, uselist=False)
user_style_one_typed: Mapped[User] = relationship(User, uselist=False)
user_style_two = relationship("User", uselist=False)
user_style_two_typed: Mapped["User"] = relationship("User", uselist=False)
# these is obviously not correct relationally but want to see the typing
# work out with a real class passed as the argument
user_style_three: Mapped[List[User]] = relationship(User, uselist=True)
user_style_four: Mapped[List[User]] = relationship("User", uselist=True)
user_style_five: Mapped[List[User]] = relationship(User, uselist=True)
user_style_six: Mapped[Set[User]] = relationship(
User, uselist=True, collection_class=set
)
user_style_seven = relationship(User, uselist=True, collection_class=set)
user_style_eight = relationship(User, uselist=True, collection_class=list)
user_style_nine = relationship(User, uselist=True)
user_style_ten = relationship(
User, collection_class=attribute_keyed_dict("name")
)
user_style_ten_typed: Mapped[Dict[str, User]] = relationship(
User, collection_class=attribute_keyed_dict("name")
)
# pylance rejects this however. cannot get both to work at the same
# time.
# if collection_class is cast() to mutablemapping, then pylance seems
# OK. cannot make sense of the errors or what would the official way to
# do these things would be. pylance keeps changing and newly breaking
# things, never know what's a bug, what's a "known limitation", and what's
# "you need to learn more". I can't imagine most programmers being able
# to navigate this stuff
# user_style_ten_typed_mapping: Mapped[MutableMapping[str, User]] = relationship(
# User, collection_class=attribute_mapped_collection("name")
# )
if typing.TYPE_CHECKING:
assert_type(User.addresses_style_one, InstrumentedAttribute[list[Address]])
assert_type(User.addresses_style_two, InstrumentedAttribute[set[Address]])
assert_type(User.addresses_style_three, InstrumentedAttribute[Any])
assert_type(User.addresses_style_three_cast, InstrumentedAttribute[Any])
assert_type(User.addresses_style_four, InstrumentedAttribute[Any])
assert_type(Address.user_style_one, InstrumentedAttribute[Any])
assert_type(Address.user_style_one_typed, InstrumentedAttribute[User])
assert_type(Address.user_style_two, InstrumentedAttribute[Any])
assert_type(Address.user_style_two_typed, InstrumentedAttribute[User])
# reveal_type(Address.user_style_six)
# reveal_type(Address.user_style_seven)
assert_type(Address.user_style_eight, InstrumentedAttribute[Any])
assert_type(Address.user_style_nine, InstrumentedAttribute[Any])
assert_type(Address.user_style_ten, InstrumentedAttribute[Any])
assert_type(
Address.user_style_ten_typed, InstrumentedAttribute[dict[str, User]]
)
| Address |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/decorators/kubernetes_cmd.py | {
"start": 1268,
"end": 4677
} | class ____(DecoratedOperator, KubernetesPodOperator):
custom_operator_name = "@task.kubernetes_cmd"
template_fields: Sequence[str] = KubernetesPodOperator.template_fields
overwrite_rtif_after_execution: bool = True
def __init__(self, *, python_callable: Callable, args_only: bool = False, **kwargs) -> None:
self.args_only = args_only
cmds = kwargs.pop("cmds", None)
arguments = kwargs.pop("arguments", None)
if cmds is not None or arguments is not None:
warnings.warn(
f"The `cmds` and `arguments` are unused in {self.custom_operator_name} decorator. "
"You should return a list of commands or image entrypoint arguments with "
"args_only=True from the python_callable.",
UserWarning,
stacklevel=3,
)
# If the name was not provided, we generate operator name from the python_callable
# we also instruct operator to add a random suffix to avoid collisions by default
op_name = kwargs.pop("name", f"k8s-airflow-pod-{python_callable.__name__}")
random_name_suffix = kwargs.pop("random_name_suffix", True)
super().__init__(
python_callable=python_callable,
name=op_name,
random_name_suffix=random_name_suffix,
cmds=None,
arguments=None,
**kwargs,
)
def execute(self, context: Context):
generated = self._generate_cmds(context)
if self.args_only:
self.cmds = []
self.arguments = generated
else:
self.cmds = generated
self.arguments = []
context["ti"].render_templates() # type: ignore[attr-defined]
return super().execute(context)
def _generate_cmds(self, context: Context) -> list[str]:
context_merge(context, self.op_kwargs)
kwargs = determine_kwargs(self.python_callable, self.op_args, context)
generated_cmds = self.python_callable(*self.op_args, **kwargs)
func_name = self.python_callable.__name__
if not isinstance(generated_cmds, list):
raise TypeError(
f"Expected python_callable to return a list of strings, but got {type(generated_cmds)}"
)
if not all(isinstance(cmd, str) for cmd in generated_cmds):
raise TypeError(f"Expected {func_name} to return a list of strings, but got {generated_cmds}")
if not generated_cmds:
raise ValueError(f"The {func_name} returned an empty list of commands")
return generated_cmds
def kubernetes_cmd_task(
python_callable: Callable | None = None,
**kwargs,
) -> TaskDecorator:
"""
Kubernetes cmd operator decorator.
This wraps a function which should return command to be executed
in K8s using KubernetesPodOperator. The function should return a list of strings.
If args_only is set to True, the function should return a list of arguments for
container default command. Also accepts any argument that KubernetesPodOperator
will via ``kwargs``. Can be reused in a single DAG.
:param python_callable: Function to decorate
"""
return task_decorator_factory(
python_callable=python_callable,
decorated_operator_class=_KubernetesCmdDecoratedOperator,
**kwargs,
)
| _KubernetesCmdDecoratedOperator |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/inspection_inspect.py | {
"start": 606,
"end": 658
} | class ____(DeclarativeBaseNoMeta):
pass
| BaseNoMeta |
python | huggingface__transformers | tests/models/roformer/test_modeling_roformer.py | {
"start": 1509,
"end": 14065
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return RoFormerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RoFormerModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = RoFormerModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_generate_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config = copy.deepcopy(config)
config.is_decoder = True
model = RoFormerForCausalLM(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=15, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=15, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RoFormerForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = RoFormerForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = RoFormerForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RoFormerForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = RoFormerForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = RoFormerForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| RoFormerModelTester |
python | python-poetry__poetry | src/poetry/console/commands/env_command.py | {
"start": 181,
"end": 544
} | class ____(Command):
def __init__(self) -> None:
# Set in poetry.console.application.Application.configure_env
self._env: Env | None = None
super().__init__()
@property
def env(self) -> Env:
assert self._env is not None
return self._env
def set_env(self, env: Env) -> None:
self._env = env
| EnvCommand |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-drive/source_google_drive/spec.py | {
"start": 2042,
"end": 2617
} | class ____(BaseModel):
class Config(OneOfOptionConfig):
title = "Service Account Key Authentication"
discriminator = "auth_type"
auth_type: Literal["Service"] = Field("Service", const=True)
service_account_info: str = Field(
title="Service Account Information",
description='The JSON key of the service account to use for authorization. Read more <a href="https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys">here</a>.',
airbyte_secret=True,
)
| ServiceAccountCredentials |
python | pandas-dev__pandas | pandas/tseries/holiday.py | {
"start": 3445,
"end": 13437
} | class ____:
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
start_date: Timestamp | None
end_date: Timestamp | None
days_of_week: tuple[int, ...] | None
def __init__(
self,
name: str,
year=None,
month=None,
day=None,
offset: BaseOffset | list[BaseOffset] | None = None,
observance: Callable | None = None,
start_date=None,
end_date=None,
days_of_week: tuple | None = None,
exclude_dates: DatetimeIndex | None = None,
) -> None:
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
year : int, default None
Year of the holiday
month : int, default None
Month of the holiday
day : int, default None
Day of the holiday
offset : list of pandas.tseries.offsets or
class from pandas.tseries.offsets, default None
Computes offset from date
observance : function, default None
Computes when holiday is given a pandas Timestamp
start_date : datetime-like, default None
First date the holiday is observed
end_date : datetime-like, default None
Last date the holiday is observed
days_of_week : tuple of int or dateutil.relativedelta weekday strs, default None
Provide a tuple of days e.g (0,1,2,3,) for Monday through Thursday
Monday=0,..,Sunday=6
Only instances of the holiday included in days_of_week will be computed
exclude_dates : DatetimeIndex or default None
Specific dates to exclude e.g. skipping a specific year's holiday
Examples
--------
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = pd.tseries.holiday.Holiday(
... "Memorial Day", month=5, day=31, offset=pd.DateOffset(weekday=MO(-1))
... )
>>> USMemorialDay
Holiday: Memorial Day (month=5, day=31, offset=<DateOffset: weekday=MO(-1)>)
>>> USLaborDay = pd.tseries.holiday.Holiday(
... "Labor Day", month=9, day=1, offset=pd.DateOffset(weekday=MO(1))
... )
>>> USLaborDay
Holiday: Labor Day (month=9, day=1, offset=<DateOffset: weekday=MO(+1)>)
>>> July3rd = pd.tseries.holiday.Holiday("July 3rd", month=7, day=3)
>>> July3rd
Holiday: July 3rd (month=7, day=3, )
>>> NewYears = pd.tseries.holiday.Holiday(
... "New Years Day",
... month=1,
... day=1,
... observance=pd.tseries.holiday.nearest_workday,
... )
>>> NewYears # doctest: +SKIP
Holiday: New Years Day (
month=1, day=1, observance=<function nearest_workday at 0x66545e9bc440>
)
>>> July3rd = pd.tseries.holiday.Holiday(
... "July 3rd", month=7, day=3, days_of_week=(0, 1, 2, 3)
... )
>>> July3rd
Holiday: July 3rd (month=7, day=3, )
"""
if offset is not None:
if observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
if not (
isinstance(offset, BaseOffset)
or (
isinstance(offset, list)
and all(isinstance(off, BaseOffset) for off in offset)
)
):
raise ValueError(
"Only BaseOffsets and flat lists of them are supported for offset."
)
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = (
Timestamp(start_date) if start_date is not None else start_date
)
self.end_date = Timestamp(end_date) if end_date is not None else end_date
self.observance = observance
if not (days_of_week is None or isinstance(days_of_week, tuple)):
raise ValueError("days_of_week must be None or tuple.")
self.days_of_week = days_of_week
if not (exclude_dates is None or isinstance(exclude_dates, DatetimeIndex)):
raise ValueError("exclude_dates must be None or of type DatetimeIndex.")
self.exclude_dates = exclude_dates
def __repr__(self) -> str:
info = ""
if self.year is not None:
info += f"year={self.year}, "
info += f"month={self.month}, day={self.day}, "
if self.offset is not None:
info += f"offset={self.offset}"
if self.observance is not None:
info += f"observance={self.observance}"
repr = f"Holiday: {self.name} ({info})"
return repr
@overload
def dates(self, start_date, end_date, return_name: Literal[True]) -> Series: ...
@overload
def dates(
self, start_date, end_date, return_name: Literal[False]
) -> DatetimeIndex: ...
@overload
def dates(self, start_date, end_date) -> DatetimeIndex: ...
def dates(
self, start_date, end_date, return_name: bool = False
) -> Series | DatetimeIndex:
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
Returns
-------
Series or DatetimeIndex
Series if return_name is True
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
dti = DatetimeIndex([dt])
if return_name:
return Series(self.name, index=dti)
else:
return dti
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[
np.isin(
# error: "DatetimeIndex" has no attribute "dayofweek"
holiday_dates.dayofweek, # type: ignore[attr-defined]
self.days_of_week,
).ravel()
]
if self.start_date is not None:
filter_start_date = max(
self.start_date.tz_localize(filter_start_date.tz), filter_start_date
)
if self.end_date is not None:
filter_end_date = min(
self.end_date.tz_localize(filter_end_date.tz), filter_end_date
)
holiday_dates = holiday_dates[
(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)
]
if self.exclude_dates is not None:
holiday_dates = holiday_dates.difference(self.exclude_dates)
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(
self, start_date: Timestamp, end_date: Timestamp
) -> DatetimeIndex:
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day)
)
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day)
)
# Don't process unnecessary holidays
dates = date_range(
start=reference_start_date,
end=reference_end_date,
freq=year_offset,
tz=start_date.tz,
)
return dates
def _apply_rule(self, dates: DatetimeIndex) -> DatetimeIndex:
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if dates.empty:
return dates.copy()
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
dates += offset
return dates
holiday_calendars: dict[str, type[AbstractHolidayCalendar]] = {}
def register(cls) -> None:
try:
name = cls.name
except AttributeError:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name: str) -> AbstractHolidayCalendar:
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
| Holiday |
python | openai__openai-python | src/openai/types/responses/response_function_shell_call_output_content_param.py | {
"start": 456,
"end": 723
} | class ____(TypedDict, total=False):
exit_code: Required[int]
"""The exit code returned by the shell process."""
type: Required[Literal["exit"]]
"""The outcome type. Always `exit`."""
Outcome: TypeAlias = Union[OutcomeTimeout, OutcomeExit]
| OutcomeExit |
python | huggingface__transformers | src/transformers/models/blip_2/configuration_blip_2.py | {
"start": 904,
"end": 4660
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a
BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration defaults will yield a similar configuration to that of the BLIP-2
[Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1408):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 39):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults
to 1e-5): The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries and values in the self-attention layers.
Example:
```python
>>> from transformers import Blip2VisionConfig, Blip2VisionModel
>>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration
>>> configuration = Blip2VisionConfig()
>>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration
>>> model = Blip2VisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "blip_2_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1408,
intermediate_size=6144,
num_hidden_layers=39,
num_attention_heads=16,
image_size=224,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=1e-10,
qkv_bias=True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.qkv_bias = qkv_bias
| Blip2VisionConfig |
python | getsentry__sentry | tests/sentry/hybridcloud/rpc/test_sig.py | {
"start": 132,
"end": 1350
} | class ____(TestCase):
def test_signature(self) -> None:
class AnObject(pydantic.BaseModel):
a: int
b: str
def a_function(arg1: AnObject, arg2: AnObject) -> AnObject:
raise NotImplementedError
sig = SerializableFunctionSignature(a_function)
arg_values = dict(arg1=AnObject(a=1, b="foo"), arg2=AnObject(a=2, b="bar"))
serialized_arguments = sig.serialize_arguments(arg_values)
assert serialized_arguments == {"arg1": {"a": 1, "b": "foo"}, "arg2": {"a": 2, "b": "bar"}}
deserialized_arguments = sig.deserialize_arguments(serialized_arguments)
assert isinstance(deserialized_arguments, pydantic.BaseModel)
assert set(deserialized_arguments.__dict__.keys()) == {"arg1", "arg2"}
assert hasattr(deserialized_arguments, "arg1")
assert deserialized_arguments.arg1 == AnObject(a=1, b="foo")
assert hasattr(deserialized_arguments, "arg2")
assert deserialized_arguments.arg2 == AnObject(a=2, b="bar")
deserialized_return_value = sig.deserialize_return_value(dict(a=3, b="qux"))
assert deserialized_return_value == AnObject(a=3, b="qux")
| SerializableFunctionSignatureTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.