language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | conda__conda | conda/common/configuration.py | {
"start": 47441,
"end": 49915
} | class ____(type):
"""metaclass for Configuration"""
def __init__(cls, name, bases, attr):
super().__init__(name, bases, attr)
# call _set_name for each parameter found during class creation
cls.parameter_names = tuple(
p._set_name(name)
for name, p in cls.__dict__.items()
if isinstance(p, ParameterLoader)
)
# Build parameter_names_and_aliases by extracting parameter loaders directly
cls._set_parameter_names_and_aliases()
@property
def _parameter_loaders(cls) -> dict[str, ParameterLoader]:
return {
name: param
for name, param in cls.__dict__.items()
if isinstance(param, ParameterLoader)
}
def __call__(cls, *args, **kwargs):
self = super().__call__(*args, **kwargs)
self._parameter_loaders = cls._parameter_loaders
return self
DEFAULT_CONDARC_FILENAME: Final = ".condarc"
ALTERNATIVE_CONDARC_FILENAME: Final = "condarc"
CONDARC_FILENAMES = (DEFAULT_CONDARC_FILENAME, ALTERNATIVE_CONDARC_FILENAME)
YAML_EXTENSIONS = (".yml", ".yaml")
_RE_CUSTOM_EXPANDVARS = compile(
rf"""
# delimiter and a Python identifier
\$(?P<named>{Template.idpattern}) |
# delimiter and a braced identifier
\${{(?P<braced>{Template.idpattern})}} |
# delimiter padded identifier
%(?P<padded>{Template.idpattern})%
""",
flags=IGNORECASE | VERBOSE,
)
def custom_expandvars(
template: str, mapping: Mapping[str, Any] = {}, /, **kwargs
) -> str:
"""Expand variables in a string.
Inspired by `string.Template` and modified to mirror `os.path.expandvars` functionality
allowing custom variables without mutating `os.environ`.
Expands POSIX and Windows CMD environment variables as follows:
- $VARIABLE β value of VARIABLE
- ${VARIABLE} β value of VARIABLE
- %VARIABLE% β value of VARIABLE
Invalid substitutions are left as-is:
- $MISSING β $MISSING
- ${MISSING} β ${MISSING}
- %MISSING% β %MISSING%
- $$ β $$
- %% β %%
- $ β $
- % β %
"""
mapping = {**mapping, **kwargs}
def convert(match: Match):
return str(
mapping.get(
match.group("named") or match.group("braced") or match.group("padded"),
match.group(), # fallback to the original string
)
)
return _RE_CUSTOM_EXPANDVARS.sub(convert, template)
| ConfigurationType |
python | django-extensions__django-extensions | tests/test_validators.py | {
"start": 3620,
"end": 6597
} | class ____(SimpleTestCase):
def test_custom_message_and_code(self):
self.validator = HexValidator(message="message", code="code")
self.assertEqual(self.validator.message, "message")
self.assertEqual(self.validator.code, "code")
def test_equality_of_objs_with_obj_of_different_type(self):
self.assertNotEqual(TypeError(), HexValidator())
def test_equality_of_objs_with_different_code(self):
self.assertNotEqual(HexValidator(code="1"), HexValidator(code="a"))
def test_equality_of_objs_with_different_message(self):
self.assertNotEqual(
HexValidator(code="code", message="a"),
HexValidator(code="code", message="acb"),
)
def test_equality_of_objs_with_same_code_and_message(self):
self.assertEqual(
HexValidator(code="c", message="m"), HexValidator(code="c", message="m")
)
def test_fixed_length(self):
value = "abcd"
self.validator = HexValidator(length=5)
with self.assertRaises(ValidationError) as err:
self.validator(value)
self.assertEqual(
str(err.exception), "['Invalid length. Must be 5 characters.']"
)
self.assertEqual(err.exception.code, "hex_only_length")
def test_min_length(self):
value = "a"
self.validator = HexValidator(min_length=5)
with self.assertRaises(ValidationError) as err:
self.validator(value)
self.assertEqual(
str(err.exception), "['Ensure that there are more than 5 characters.']"
)
self.assertEqual(err.exception.code, "hex_only_min_length")
def test_with_max_length(self):
value = "abcd"
self.validator = HexValidator(max_length=2)
with self.assertRaises(ValidationError) as err:
self.validator(value)
self.assertEqual(
str(err.exception), "['Ensure that there are no more than 2 characters.']"
)
self.assertEqual(err.exception.code, "hex_only_max_length")
def test_invalid_type(self):
value = 1
with patch("django_extensions.validators.force_str", return_value=1):
self.validator = HexValidator()
with self.assertRaises(ValidationError) as err:
self.validator(value)
self.assertEqual(str(err.exception), "['Only a hex string is allowed.']")
self.assertEqual(err.exception.code, "hex_only")
def test_invalid_hex(self):
value = "1"
self.validator = HexValidator()
with self.assertRaises(ValidationError) as err:
self.validator(value)
self.assertEqual(str(err.exception), "['Only a hex string is allowed.']")
self.assertEqual(err.exception.code, "hex_only")
def test_valid_hex(self):
value = "b901ef"
self.validator = HexValidator()
result = self.validator(value)
self.assertIsNone(result)
| TestHexValidator |
python | pytorch__pytorch | benchmarks/inductor_backends/cutlass.py | {
"start": 2239,
"end": 2374
} | class ____(ExperimentConfig):
def name(self) -> str:
return "aten"
@dataclass(frozen=True, kw_only=True)
| AtenExperimentConfig |
python | google__pytype | pytype/abstract/_typing.py | {
"start": 1331,
"end": 3362
} | class ____(_instance_base.SimpleValue, mixin.HasSlots):
"""Base class of annotations that can be parameterized."""
def __init__(self, name: str, ctx: "context.Context"):
super().__init__(name, ctx)
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__getitem__", self.getitem_slot)
def getitem_slot(
self, node: _base.BaseValue, slice_var: "cfg.Variable"
) -> "tuple[cfg.CFGNode, cfg.Variable]":
"""Custom __getitem__ implementation."""
slice_content = abstract_utils.maybe_extract_tuple(slice_var)
inner, ellipses = self._build_inner(slice_content)
value = self._build_value(node, tuple(inner), ellipses)
return node, value.to_variable(node)
def _build_inner(
self, slice_content: "Iterable[cfg.Variable]"
) -> "tuple[list[_base.BaseValue], set[int]]":
"""Build the list of parameters.
Args:
slice_content: The iterable of variables to extract parameters from.
Returns:
A tuple of a list of parameters and a set of indices at which an ellipsis
was replaced with Any.
"""
inner = []
ellipses = set()
for var in slice_content:
if len(var.bindings) > 1:
self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, var.data)
inner.append(self.ctx.convert.unsolvable)
else:
val = var.bindings[0].data
if val is self.ctx.convert.ellipsis:
# Ellipses are allowed only in special cases, so turn them into Any
# but record the indices so we can check if they're legal.
ellipses.add(len(inner))
inner.append(self.ctx.convert.unsolvable)
else:
inner.append(val)
return inner, ellipses
def _build_value(
self,
node: _base.BaseValue,
inner: tuple[_base.BaseValue, ...],
ellipses: set[int],
):
raise NotImplementedError(self.__class__.__name__)
def __repr__(self) -> str:
return f"AnnotationClass({self.name})"
def _get_class(self):
return self.ctx.convert.type_type
| AnnotationClass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 334029,
"end": 335136
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateEnterpriseAllowPrivateRepositoryForkingSetting
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "setting_value", "policy_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set the allow private
repository forking setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue), graphql_name="settingValue")
"""The value for the allow private repository forking setting on the
enterprise.
"""
policy_value = sgqlc.types.Field(EnterpriseAllowPrivateRepositoryForkingPolicyValue, graphql_name="policyValue")
"""The value for the allow private repository forking policy on the
enterprise.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseAllowPrivateRepositoryForkingSettingInput |
python | getsentry__sentry | src/sentry/integrations/cursor/client.py | {
"start": 611,
"end": 3581
} | class ____(CodingAgentClient):
integration_name = "cursor"
base_url = "https://api.cursor.com"
api_key: str
def __init__(self, api_key: str, webhook_secret: str):
super().__init__()
self.api_key = api_key
self.webhook_secret = webhook_secret
def _get_auth_headers(self) -> dict[str, str]:
return {"Authorization": f"Bearer {self.api_key}"}
def get_api_key_metadata(self) -> CursorApiKeyMetadata:
"""Fetch metadata about the API key from Cursor's /v0/me endpoint."""
logger.info(
"coding_agent.cursor.get_api_key_metadata",
extra={"agent_type": self.__class__.__name__},
)
api_response = self.get(
"/v0/me",
headers={
"content-type": "application/json;charset=utf-8",
**self._get_auth_headers(),
},
timeout=30,
)
return CursorApiKeyMetadata.validate(api_response.json)
def launch(self, webhook_url: str, request: CodingAgentLaunchRequest) -> CodingAgentState:
"""Launch coding agent with webhook callback."""
payload = CursorAgentLaunchRequestBody(
prompt=CursorAgentLaunchRequestPrompt(
text=request.prompt,
),
source=CursorAgentSource(
repository=f"https://github.com/{request.repository.owner}/{request.repository.name}",
ref=request.repository.branch_name,
),
webhook=CursorAgentLaunchRequestWebhook(url=webhook_url, secret=self.webhook_secret),
target=CursorAgentLaunchRequestTarget(
autoCreatePr=request.auto_create_pr,
branchName=request.branch_name,
openAsCursorGithubApp=True,
),
)
logger.info(
"coding_agent.cursor.launch",
extra={
"webhook_url": webhook_url,
"agent_type": self.__class__.__name__,
},
)
# Use shared ApiClient to get consistent error handling with body surfaced
api_response = self.post(
"/v0/agents",
headers={
"content-type": "application/json;charset=utf-8",
**self._get_auth_headers(),
},
data=payload.dict(exclude_none=True),
json=True,
timeout=60,
)
launch_response = CursorAgentLaunchResponse.validate(api_response.json)
return CodingAgentState(
id=launch_response.id,
status=CodingAgentStatus.RUNNING, # Cursor agent doesn't send when it actually starts so we just assume it's running
provider=CodingAgentProviderType.CURSOR_BACKGROUND_AGENT,
name=launch_response.name or f"Cursor Agent {launch_response.id}",
started_at=launch_response.createdAt,
agent_url=launch_response.target.url,
)
| CursorAgentClient |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 25078,
"end": 25686
} | class ____:
def __getitem__(self, inner_type):
check.not_none_param(inner_type, "inner_type")
return _List(resolve_dagster_type(inner_type))
def __call__(self, inner_type):
check.not_none_param(inner_type, "inner_type")
return _List(inner_type)
List: DagsterListApi = DagsterListApi()
def _List(inner_type):
check.inst_param(inner_type, "inner_type", DagsterType)
if inner_type is Nothing:
raise DagsterInvalidDefinitionError(
"Type Nothing can not be wrapped in List or Optional"
)
return ListType(inner_type)
| DagsterListApi |
python | encode__django-rest-framework | tests/test_pagination.py | {
"start": 38898,
"end": 43448
} | class ____(CursorPaginationTestsMixin, TestCase):
"""
Unit tests for `pagination.CursorPagination` for value querysets.
"""
def setUp(self):
class ExamplePagination(pagination.CursorPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 20
ordering = 'created'
self.pagination = ExamplePagination()
data = [
1, 1, 1, 1, 1,
1, 2, 3, 4, 4,
4, 4, 5, 6, 7,
7, 7, 7, 7, 7,
7, 7, 7, 8, 9,
9, 9, 9, 9, 9
]
for idx in data:
CursorPaginationModel.objects.create(created=idx)
self.queryset = CursorPaginationModel.objects.values()
def get_pages(self, url):
"""
Given a URL return a tuple of:
(previous page, current page, next page, previous url, next url)
"""
request = Request(factory.get(url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
current = [item['created'] for item in queryset]
next_url = self.pagination.get_next_link()
previous_url = self.pagination.get_previous_link()
if next_url is not None:
request = Request(factory.get(next_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
next = [item['created'] for item in queryset]
else:
next = None
if previous_url is not None:
request = Request(factory.get(previous_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
previous = [item['created'] for item in queryset]
else:
previous = None
return (previous, current, next, previous_url, next_url)
def test_get_displayed_page_numbers():
"""
Test our contextual page display function.
This determines which pages to display in a pagination control,
given the current page and the last page.
"""
displayed_page_numbers = pagination._get_displayed_page_numbers
# At five pages or less, all pages are displayed, always.
assert displayed_page_numbers(1, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(2, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(3, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(4, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(5, 5) == [1, 2, 3, 4, 5]
# Between six and either pages we may have a single page break.
assert displayed_page_numbers(1, 6) == [1, 2, 3, None, 6]
assert displayed_page_numbers(2, 6) == [1, 2, 3, None, 6]
assert displayed_page_numbers(3, 6) == [1, 2, 3, 4, 5, 6]
assert displayed_page_numbers(4, 6) == [1, 2, 3, 4, 5, 6]
assert displayed_page_numbers(5, 6) == [1, None, 4, 5, 6]
assert displayed_page_numbers(6, 6) == [1, None, 4, 5, 6]
assert displayed_page_numbers(1, 7) == [1, 2, 3, None, 7]
assert displayed_page_numbers(2, 7) == [1, 2, 3, None, 7]
assert displayed_page_numbers(3, 7) == [1, 2, 3, 4, None, 7]
assert displayed_page_numbers(4, 7) == [1, 2, 3, 4, 5, 6, 7]
assert displayed_page_numbers(5, 7) == [1, None, 4, 5, 6, 7]
assert displayed_page_numbers(6, 7) == [1, None, 5, 6, 7]
assert displayed_page_numbers(7, 7) == [1, None, 5, 6, 7]
assert displayed_page_numbers(1, 8) == [1, 2, 3, None, 8]
assert displayed_page_numbers(2, 8) == [1, 2, 3, None, 8]
assert displayed_page_numbers(3, 8) == [1, 2, 3, 4, None, 8]
assert displayed_page_numbers(4, 8) == [1, 2, 3, 4, 5, None, 8]
assert displayed_page_numbers(5, 8) == [1, None, 4, 5, 6, 7, 8]
assert displayed_page_numbers(6, 8) == [1, None, 5, 6, 7, 8]
assert displayed_page_numbers(7, 8) == [1, None, 6, 7, 8]
assert displayed_page_numbers(8, 8) == [1, None, 6, 7, 8]
# At nine or more pages we may have two page breaks, one on each side.
assert displayed_page_numbers(1, 9) == [1, 2, 3, None, 9]
assert displayed_page_numbers(2, 9) == [1, 2, 3, None, 9]
assert displayed_page_numbers(3, 9) == [1, 2, 3, 4, None, 9]
assert displayed_page_numbers(4, 9) == [1, 2, 3, 4, 5, None, 9]
assert displayed_page_numbers(5, 9) == [1, None, 4, 5, 6, None, 9]
assert displayed_page_numbers(6, 9) == [1, None, 5, 6, 7, 8, 9]
assert displayed_page_numbers(7, 9) == [1, None, 6, 7, 8, 9]
assert displayed_page_numbers(8, 9) == [1, None, 7, 8, 9]
assert displayed_page_numbers(9, 9) == [1, None, 7, 8, 9]
| TestCursorPaginationWithValueQueryset |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/query_expression.py | {
"start": 16941,
"end": 18610
} | class ____(QueryExpressionVisitor[QueryExpression]):
"""
Visitor that recursively applies a unit transformation on all the numeric scalars in a `QueryExpression`.
"""
def __init__(self, unit: Unit):
self._unit = unit
def _visit_formula(self, formula: Formula) -> QueryExpression:
has_coefficient_operators = formula.function_name in COEFFICIENT_OPERATORS
# In case the formula has a coefficient operator with all scalars, we want to scale the entire formula by
# wrapping it in another formula. For all the other cases, we just want to apply the scaling to each component
# of the formula, to make the formula less deep.
# Example:
# scaling (a * b) by 1000 = (a * b) * 1000
# scaling (a + b) by 1000 = (a * 1000 + b * 1000) in this case the multiplication is performed in-memory
if has_coefficient_operators:
has_all_scalars = True
for parameter in formula.parameters:
if not self._is_numeric_scalar(parameter):
has_all_scalars = False
return self._unit.apply_on_query_expression(formula) if has_all_scalars else formula
return super()._visit_formula(formula)
def _visit_int(self, int_number: float) -> QueryExpression:
return self._unit.apply_on_query_expression(int_number)
def _visit_float(self, float_number: float) -> QueryExpression:
return self._unit.apply_on_query_expression(float_number)
def _is_numeric_scalar(self, value: QueryExpression) -> bool:
return isinstance(value, int) or isinstance(value, float)
| NumericScalarsNormalizationVisitor |
python | huggingface__transformers | tests/models/zamba/test_modeling_zamba.py | {
"start": 1509,
"end": 10111
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
mamba_dt_rank=32,
num_hidden_layers=5,
attn_layer_offset=1,
attn_layer_period=8,
num_attention_heads=4,
num_key_value_heads=4,
n_mamba_heads=2,
intermediate_size=37,
hidden_act="gelu",
hidden_mamba_act="silu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.mamba_dt_rank = mamba_dt_rank
self.num_hidden_layers = num_hidden_layers
self.attn_layer_offset = attn_layer_offset
self.attn_layer_period = attn_layer_period
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.n_mamba_heads = n_mamba_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_mamba_act = hidden_mamba_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return ZambaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
mamba_dt_rank=self.mamba_dt_rank,
num_hidden_layers=self.num_hidden_layers,
attn_layer_offset=self.attn_layer_offset,
attn_layer_period=self.attn_layer_period,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
n_mamba_heads=self.n_mamba_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_mamba_act=self.hidden_mamba_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=True,
initializer_range=self.initializer_range,
use_mamba_kernels=False,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = ZambaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = ZambaForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids, labels=token_labels)
result = model(input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
config.add_cross_attention = True
model = ZambaForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
# Attention: Zamba needs the cache to be initialized to return a cache!
past_key_values = ZambaHybridDynamicCache(config, input_ids.shape[0], model.dtype, device=model.device)
outputs = model(
input_ids,
attention_mask=input_mask,
past_key_values=past_key_values,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
cache_position=torch.arange(
input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device
),
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = ZambaForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| ZambaModelTester |
python | Farama-Foundation__Gymnasium | gymnasium/envs/box2d/bipedal_walker.py | {
"start": 28274,
"end": 32027
} | class ____:
STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1, 2, 3
SPEED = 0.29 # Will fall forward on higher speed
state = STAY_ON_ONE_LEG
moving_leg = 0
supporting_leg = 1 - moving_leg
SUPPORT_KNEE_ANGLE = +0.1
supporting_knee_angle = SUPPORT_KNEE_ANGLE
a = np.array([0.0, 0.0, 0.0, 0.0])
def step_heuristic(self, s):
moving_s_base = 4 + 5 * self.moving_leg
supporting_s_base = 4 + 5 * self.supporting_leg
hip_targ = [None, None] # -0.8 .. +1.1
knee_targ = [None, None] # -0.6 .. +0.9
hip_todo = [0.0, 0.0]
knee_todo = [0.0, 0.0]
if self.state == self.STAY_ON_ONE_LEG:
hip_targ[self.moving_leg] = 1.1
knee_targ[self.moving_leg] = -0.6
self.supporting_knee_angle += 0.03
if s[2] > self.SPEED:
self.supporting_knee_angle += 0.03
self.supporting_knee_angle = min(
self.supporting_knee_angle, self.SUPPORT_KNEE_ANGLE
)
knee_targ[self.supporting_leg] = self.supporting_knee_angle
if s[supporting_s_base + 0] < 0.10: # supporting leg is behind
self.state = self.PUT_OTHER_DOWN
if self.state == self.PUT_OTHER_DOWN:
hip_targ[self.moving_leg] = +0.1
knee_targ[self.moving_leg] = self.SUPPORT_KNEE_ANGLE
knee_targ[self.supporting_leg] = self.supporting_knee_angle
if s[moving_s_base + 4]:
self.state = self.PUSH_OFF
self.supporting_knee_angle = min(
s[moving_s_base + 2], self.SUPPORT_KNEE_ANGLE
)
if self.state == self.PUSH_OFF:
knee_targ[self.moving_leg] = self.supporting_knee_angle
knee_targ[self.supporting_leg] = +1.0
if s[supporting_s_base + 2] > 0.88 or s[2] > 1.2 * self.SPEED:
self.state = self.STAY_ON_ONE_LEG
self.moving_leg = 1 - self.moving_leg
self.supporting_leg = 1 - self.moving_leg
if hip_targ[0]:
hip_todo[0] = 0.9 * (hip_targ[0] - s[4]) - 0.25 * s[5]
if hip_targ[1]:
hip_todo[1] = 0.9 * (hip_targ[1] - s[9]) - 0.25 * s[10]
if knee_targ[0]:
knee_todo[0] = 4.0 * (knee_targ[0] - s[6]) - 0.25 * s[7]
if knee_targ[1]:
knee_todo[1] = 4.0 * (knee_targ[1] - s[11]) - 0.25 * s[12]
hip_todo[0] -= 0.9 * (0 - s[0]) - 1.5 * s[1] # PID to keep head strait
hip_todo[1] -= 0.9 * (0 - s[0]) - 1.5 * s[1]
knee_todo[0] -= 15.0 * s[3] # vertical speed, to damp oscillations
knee_todo[1] -= 15.0 * s[3]
self.a[0] = hip_todo[0]
self.a[1] = knee_todo[0]
self.a[2] = hip_todo[1]
self.a[3] = knee_todo[1]
self.a = np.clip(0.5 * self.a, -1.0, 1.0)
return self.a
if __name__ == "__main__":
env = BipedalWalker(render_mode="human")
env.reset()
steps = 0
total_reward = 0
a = np.array([0.0, 0.0, 0.0, 0.0])
# Heurisic: suboptimal, have no notion of balance.
heuristics = BipedalWalkerHeuristics()
while True:
s, r, terminated, truncated, info = env.step(a)
total_reward += r
if steps % 20 == 0 or terminated or truncated:
print("\naction " + str([f"{x:+0.2f}" for x in a]))
print(f"step {steps} total_reward {total_reward:+0.2f}")
print("hull " + str([f"{x:+0.2f}" for x in s[0:4]]))
print("leg0 " + str([f"{x:+0.2f}" for x in s[4:9]]))
print("leg1 " + str([f"{x:+0.2f}" for x in s[9:14]]))
steps += 1
a = heuristics.step_heuristic(s)
if terminated or truncated:
break
| BipedalWalkerHeuristics |
python | justquick__django-activity-stream | actstream/feeds.py | {
"start": 9846,
"end": 10392
} | class ____(UserActivityMixin, ActivityStreamsBaseFeed):
def title(self, user):
return 'Activity feed for your followed actors'
def link(self, user):
if not user:
return reverse('actstream')
if hasattr(user, 'get_absolute_url'):
return user.get_absolute_url()
ctype = ContentType.objects.get_for_model(user)
return reverse('actstream_actor', None, (ctype.pk, user.pk))
def description(self, user):
return 'Public activities of actors you follow'
| UserActivityFeed |
python | getsentry__sentry | src/sentry/bgtasks/api.py | {
"start": 597,
"end": 2988
} | class ____:
def __init__(
self, callback: Callable[[], None], roles: list[str] | None = None, interval: int = 60
) -> None:
self.callback = callback
self.roles = roles or []
self.interval = interval
self.running = False
@property
def name(self) -> str:
return f"{self.callback.__module__}:{self.callback.__name__}"
def run(self) -> None:
if self.running:
return
self.running = True
next_run = time.time() + self.interval * random.random()
while self.running:
now = time.time()
if now >= next_run:
try:
self.callback()
except Exception:
logging.exception("bgtask.failed", extra=dict(task_name=self.name))
next_run = now + self.interval
time.sleep(1.0)
def reconfigure(self, cfg: BgTaskConfig) -> None:
if "roles" in cfg:
self.roles = cfg["roles"]
if "interval" in cfg:
self.interval = cfg["interval"]
def spawn_daemon(self) -> None:
if self.running:
return
logger.info("bgtask.spawn", extra=dict(task_name=self.name))
t = threading.Thread(target=self.run, daemon=True)
t.start()
def stop(self) -> None:
logger.info("bgtask.stop", extra=dict(task_name=self.name))
self.running = False
def get_task(task_name: str) -> BgTask:
module, task_cls = task_name.split(":", 1)
mod = __import__(module, None, None, [task_cls])
obj = getattr(mod, task_cls)
if not isinstance(obj, BgTask):
raise TypeError(f"expected BgTask @ {task_name} got {obj!r}")
return obj
def spawn_bgtasks(role: str) -> None:
for import_name, cfg in settings.BGTASKS.items():
task = get_task(import_name)
# This is already running
if task.name in tasks:
continue
task.reconfigure(cfg)
if role not in task.roles:
continue
task.spawn_daemon()
tasks[task.name] = task
def shutdown_bgtasks() -> None:
for task_name, task in list(tasks.items()):
task.stop()
tasks.pop(task_name, None)
@contextmanager
def managed_bgtasks(role: str) -> Generator[None]:
spawn_bgtasks(role)
try:
yield
finally:
shutdown_bgtasks()
| BgTask |
python | bokeh__bokeh | src/bokeh/models/textures.py | {
"start": 2197,
"end": 3260
} | class ____(Texture):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
url = Required(Image, help="""
A URL to a drawable resource like image, video, etc.
If provided with a file path, the file will be encoded using ``data:``
protocol (utf-8 encoding for ``*.svg`` and base64 for ``*.png`` and
other binary formats).
NumPy 2D arrays are also supported and use ``data:`` encoding as well.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ImageURLTexture |
python | google__python-fire | fire/test_components_py3.py | {
"start": 1405,
"end": 1483
} | class ____:
async def double(self, count=0):
return 2 * count
| WithAsyncio |
python | langchain-ai__langchain | libs/partners/qdrant/langchain_qdrant/fastembed_sparse.py | {
"start": 215,
"end": 3190
} | class ____(SparseEmbeddings):
"""An interface for sparse embedding models to use with Qdrant."""
def __init__(
self,
model_name: str = "Qdrant/bm25",
batch_size: int = 256,
cache_dir: str | None = None,
threads: int | None = None,
providers: Sequence[Any] | None = None,
parallel: int | None = None,
**kwargs: Any,
) -> None:
"""Sparse encoder implementation using FastEmbed.
Uses [FastEmbed](https://qdrant.github.io/fastembed/) for sparse text
embeddings.
For a list of available models, see [the Qdrant docs](https://qdrant.github.io/fastembed/examples/Supported_Models/).
Args:
model_name (str): The name of the model to use.
batch_size (int): Batch size for encoding.
cache_dir (str, optional): The path to the model cache directory.\
Can also be set using the\
`FASTEMBED_CACHE_PATH` env variable.
threads (int, optional): The number of threads onnxruntime session can use.
providers (Sequence[Any], optional): List of ONNX execution providers.\
parallel (int, optional): If `>1`, data-parallel encoding will be used, r\
Recommended for encoding of large datasets.\
If `0`, use all available cores.\
If `None`, don't use data-parallel processing,\
use default onnxruntime threading instead.\
kwargs: Additional options to pass to `fastembed.SparseTextEmbedding`
Raises:
ValueError: If the `model_name` is not supported in `SparseTextEmbedding`.
"""
try:
from fastembed import ( # type: ignore[import-not-found] # noqa: PLC0415
SparseTextEmbedding,
)
except ImportError as err:
msg = (
"The 'fastembed' package is not installed. "
"Please install it with "
"`pip install fastembed` or `pip install fastembed-gpu`."
)
raise ValueError(msg) from err
self._batch_size = batch_size
self._parallel = parallel
self._model = SparseTextEmbedding(
model_name=model_name,
cache_dir=cache_dir,
threads=threads,
providers=providers,
**kwargs,
)
def embed_documents(self, texts: list[str]) -> list[SparseVector]:
results = self._model.embed(
texts, batch_size=self._batch_size, parallel=self._parallel
)
return [
SparseVector(indices=result.indices.tolist(), values=result.values.tolist())
for result in results
]
def embed_query(self, text: str) -> SparseVector:
result = next(self._model.query_embed(text))
return SparseVector(
indices=result.indices.tolist(), values=result.values.tolist()
)
| FastEmbedSparse |
python | sphinx-doc__sphinx | sphinx/directives/admonitions.py | {
"start": 1620,
"end": 1690
} | class ____(SphinxAdmonition):
node_class = nodes.important
| Important |
python | pytorch__pytorch | test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_ops.py | {
"start": 5806,
"end": 7340
} | class ____(TestCase):
def test_scalar_type_fallback(self):
x_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(torch.int64)
x = torch.triu_indices(3, 3, device="openreg")
self.assertEqual(x_cpu, x)
def test_tensor_type_fallback(self):
x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("openreg")
y = torch.Tensor([1, 0, 2]).to("openreg")
self.assertTrue(x.device.type, "openreg")
self.assertFalse(x.is_cpu)
z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
# call sub op, which will fallback to cpu
z = torch.sub(x, y)
self.assertEqual(z_cpu, z)
# call index op, which will fallback to cpu
z_cpu = torch.Tensor([3, 1])
y = torch.Tensor([1, 0]).long().to("openreg")
z = x[y, y]
self.assertEqual(z_cpu, z)
def test_tensorlist_type_fallback(self):
# create tensors located in custom device
v_openreg = torch.Tensor([1, 2, 3]).to("openreg")
# create result tensor located in cpu
z_cpu = torch.Tensor([2, 4, 6])
# create tensorlist for foreach_add op
x = (v_openreg, v_openreg)
y = (v_openreg, v_openreg)
# Check that our device is correct.
self.assertTrue(v_openreg.device.type == "openreg")
self.assertFalse(v_openreg.is_cpu)
# call _foreach_add op, which will fallback to cpu
z = torch._foreach_add(x, y)
self.assertEqual(z_cpu, z[0])
self.assertEqual(z_cpu, z[1])
| TestFallback |
python | astropy__astropy | astropy/table/tests/test_operations.py | {
"start": 56528,
"end": 63670
} | class ____:
def _setup(self, t_cls=Table):
self.t1 = t_cls.read(
[
" a b",
" 0. foo",
" 1. bar",
],
format="ascii",
)
self.t2 = t_cls.read(
[
" a b c",
" 2. pez 4",
" 3. sez 5",
],
format="ascii",
)
self.t2["d"] = Time([1, 2], format="cxcsec")
self.t3 = t_cls(
{
"a": [[5.0, 6.0], [4.0, 3.0]],
"b": [["foo", "bar"], ["pez", "sez"]],
},
names=("a", "b"),
)
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
self.t5 = t_cls(
{
"a": [[4.0, 2.0], [1.0, 6.0]],
"b": [["foo", "pez"], ["bar", "sez"]],
},
names=("a", "b"),
)
self.t6 = t_cls.read(
[
" a b c",
" 7. pez 2",
" 4. sez 6",
" 6. foo 3",
],
format="ascii",
)
def test_validate_join_type(self):
self._setup()
with pytest.raises(TypeError, match="Did you accidentally call dstack"):
table.dstack(self.t1, self.t2)
@staticmethod
def compare_dstack(tables, out):
for ii, tbl in enumerate(tables):
for name in out.columns:
if name in tbl.colnames:
# Columns always compare equal
assert np.all(tbl[name] == out[name][:, ii])
# If input has a mask then output must have same mask
if hasattr(tbl[name], "mask"):
assert np.all(tbl[name].mask == out[name].mask[:, ii])
# If input has no mask then output might have a mask (if other table
# is missing that column). If so then all mask values should be False.
elif hasattr(out[name], "mask"):
assert not np.any(out[name].mask[:, ii])
else:
# Column missing for this table, out must have a mask with all True.
assert np.all(out[name].mask[:, ii])
def test_dstack_table_column(self, operation_table_type):
"""Stack a table with 3 cols and one column (gets auto-converted to Table)."""
self._setup(operation_table_type)
t2 = self.t1.copy()
out = table.dstack([self.t1, t2["a"]])
self.compare_dstack([self.t1, t2[("a",)]], out)
def test_dstack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail("Quantity columns do not support masking.")
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t4["a"].mask[0] = True
# Test for non-masked table
t12 = table.dstack([t1, t2], join_type="outer")
assert type(t12) is operation_table_type
assert type(t12["a"]) is type(t1["a"])
assert type(t12["b"]) is type(t1["b"])
self.compare_dstack([t1, t2], t12)
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type="outer")
assert type(t124) is operation_table_type
assert type(t124["a"]) is type(t4["a"])
assert type(t124["b"]) is type(t4["b"])
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type="inner")
assert type(t124) is operation_table_type
assert type(t124["a"]) is type(t4["a"])
assert type(t124["b"]) is type(t4["b"])
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_multi_dimension_column(self, operation_table_type):
self._setup(operation_table_type)
t3 = self.t3
t5 = self.t5
t2 = self.t2
t35 = table.dstack([t3, t5])
assert type(t35) is operation_table_type
assert type(t35["a"]) is type(t3["a"])
assert type(t35["b"]) is type(t3["b"])
self.compare_dstack([t3, t5], t35)
with pytest.raises(TableMergeError):
table.dstack([t2, t3])
def test_dstack_different_length_table(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t2
t6 = self.t6
with pytest.raises(ValueError):
table.dstack([t2, t6])
def test_dstack_single_table(self):
self._setup(Table)
out = table.dstack(self.t1)
assert np.all(out == self.t1)
def test_dstack_representation(self):
rep1 = SphericalRepresentation([1, 2] * u.deg, [3, 4] * u.deg, 1 * u.kpc)
rep2 = SphericalRepresentation([10, 20] * u.deg, [30, 40] * u.deg, 10 * u.kpc)
t1 = Table([rep1])
t2 = Table([rep2])
t12 = table.dstack([t1, t2])
assert np.all(representation_equal(t12["col0"][:, 0], rep1))
assert np.all(representation_equal(t12["col0"][:, 1], rep2))
def test_dstack_skycoord(self):
sc1 = SkyCoord([1, 2] * u.deg, [3, 4] * u.deg)
sc2 = SkyCoord([10, 20] * u.deg, [30, 40] * u.deg)
t1 = Table([sc1])
t2 = Table([sc2])
t12 = table.dstack([t1, t2])
assert skycoord_equal(sc1, t12["col0"][:, 0])
assert skycoord_equal(sc2, t12["col0"][:, 1])
def test_dstack_structured_column(self):
"""Regression tests for gh-13271."""
# Two tables with matching names, including a structured column.
t1 = Table(
[
np.array([(1.0, 1), (2.0, 2)], dtype=[("f", "f8"), ("i", "i8")]),
["one", "two"],
],
names=["structured", "string"],
)
t2 = Table(
[
np.array([(3.0, 3), (4.0, 4)], dtype=[("f", "f8"), ("i", "i8")]),
["three", "four"],
],
names=["structured", "string"],
)
t12 = table.dstack([t1, t2])
assert t12.pformat() == (
[
"structured [f, i] string ",
"------------------ ------------",
"(1., 1) .. (3., 3) one .. three",
"(2., 2) .. (4., 4) two .. four",
]
if NUMPY_LT_2_0
else [
" structured [f, i] string ",
"-------------------- ------------",
"(1.0, 1) .. (3.0, 3) one .. three",
"(2.0, 2) .. (4.0, 4) two .. four",
]
)
# One table without the structured column.
t3 = t2[("string",)]
t13 = table.dstack([t1, t3])
assert t13.pformat() == [
"structured [f, i] string ",
"----------------- ------------",
" (1.0, 1) .. -- one .. three",
" (2.0, 2) .. -- two .. four",
]
| TestDStack |
python | patrick-kidger__equinox | equinox/internal/_closure_to_pytree.py | {
"start": 1327,
"end": 2020
} | class ____:
def __init__(self, fn: types.FunctionType):
self.fn = fn
def information(self):
try:
# `fn` not defined in REPL.
source = inspect.getsource(self.fn)
except OSError:
# `fn` defined in REPL. In practice this will lead to recompilations based
# on function identity, but correctness >> speed.
return self.fn
else:
return self.fn.__qualname__, self.fn.__module__, source
def __hash__(self):
return hash(self.information())
def __eq__(self, other):
return type(self) == type(other) and self.information() == other.information()
| _FunctionWithEquality |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_s3.py | {
"start": 7360,
"end": 10842
} | class ____:
def test_serialization(self):
"""
Asserts that the S3KeysUnchangedTrigger correctly serializes its arguments
and classpath.
"""
trigger = S3KeysUnchangedTrigger(
bucket_name="test_bucket",
prefix="test",
inactivity_period=1,
min_objects=1,
inactivity_seconds=0,
previous_objects=None,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.s3.S3KeysUnchangedTrigger"
assert kwargs == {
"bucket_name": "test_bucket",
"prefix": "test",
"inactivity_period": 1,
"min_objects": 1,
"inactivity_seconds": 0,
"previous_objects": set(),
"allow_delete": True,
"aws_conn_id": "aws_default",
"last_activity_time": None,
"hook_params": {},
"verify": None,
"region_name": None,
"botocore_config": None,
"polling_period_seconds": 0,
}
@pytest.mark.asyncio
@async_mock.patch("airflow.providers.amazon.aws.triggers.s3.S3Hook.get_async_conn")
async def test_run_wait(self, mock_client):
"""Test if the task is run in trigger successfully."""
mock_client.return_value.return_value.check_key.return_value = True
trigger = S3KeysUnchangedTrigger(bucket_name="test_bucket", prefix="test")
with mock_client:
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
assert task.done() is True
asyncio.get_event_loop().stop()
def test_run_raise_value_error(self):
"""
Test if the S3KeysUnchangedTrigger raises Value error for negative inactivity_period.
"""
with pytest.raises(ValueError, match="inactivity_period must be non-negative"):
S3KeysUnchangedTrigger(bucket_name="test_bucket", prefix="test", inactivity_period=-100)
@pytest.mark.asyncio
@async_mock.patch("airflow.providers.amazon.aws.triggers.s3.S3Hook.get_async_conn")
@async_mock.patch("airflow.providers.amazon.aws.triggers.s3.S3Hook.is_keys_unchanged_async")
async def test_run_success(self, mock_is_keys_unchanged, mock_client):
"""
Test if the task is run in triggerer successfully.
"""
mock_is_keys_unchanged.return_value = {"status": "success"}
trigger = S3KeysUnchangedTrigger(bucket_name="test_bucket", prefix="test")
generator = trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "success"}) == actual
@pytest.mark.asyncio
@async_mock.patch("airflow.providers.amazon.aws.triggers.s3.S3Hook.get_async_conn")
@async_mock.patch("airflow.providers.amazon.aws.triggers.s3.S3Hook.is_keys_unchanged_async")
async def test_run_pending(self, mock_is_keys_unchanged, mock_client):
"""Test if the task is run in triggerer successfully."""
mock_is_keys_unchanged.return_value = {"status": "pending", "last_activity_time": datetime.now()}
trigger = S3KeysUnchangedTrigger(bucket_name="test_bucket", prefix="test")
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
| TestS3KeysUnchangedTrigger |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 365146,
"end": 365564
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node", "size")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(sgqlc.types.non_null("Language"), graphql_name="node")
size = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="size")
| LanguageEdge |
python | pytorch__pytorch | test/dynamo/test_minifier.py | {
"start": 413,
"end": 4150
} | class ____(MinifierTestBase):
# Test that compile, runtime, and accuracy errors after dynamo can be repro'd (both CPU and CUDA/XPU)
def _test_after_dynamo(self, device, backend, expected_error):
run_code = f"""\
@torch.compile(backend={backend!r})
def inner(x):
for _ in range(10):
x = torch.sin(x)
x = torch.relu(x)
for _ in range(10):
x = torch.cos(x)
return x
inner(torch.randn(20, 20).to("{device}"))
"""
self._run_full_test(run_code, "dynamo", expected_error, isolate=False)
def test_after_dynamo_cpu_compile_error(self):
self._test_after_dynamo(
"cpu", "relu_compile_error_TESTING_ONLY", "ReluCompileError"
)
def test_after_dynamo_cpu_runtime_error(self):
self._test_after_dynamo(
"cpu", "relu_runtime_error_TESTING_ONLY", "ReluRuntimeError"
)
def test_after_dynamo_cpu_accuracy_error(self):
self._test_after_dynamo(
"cpu", "relu_accuracy_error_TESTING_ONLY", "AccuracyError"
)
@requires_gpu
def test_after_dynamo_cuda_compile_error(self, device):
self._test_after_dynamo(
device, "relu_compile_error_TESTING_ONLY", "ReluCompileError"
)
@requires_gpu
def test_after_dynamo_cuda_runtime_error(self, device):
self._test_after_dynamo(
device, "relu_runtime_error_TESTING_ONLY", "ReluRuntimeError"
)
@requires_gpu
def test_after_dynamo_cuda_accuracy_error(self, device):
self._test_after_dynamo(
device, "relu_accuracy_error_TESTING_ONLY", "AccuracyError"
)
def test_after_dynamo_non_leaf_compile_error(self):
run_code = """\
@torch.compile(backend="non_leaf_compile_error_TESTING_ONLY")
def inner(x):
return x + 1
inner(torch.randn(20, 20, requires_grad=True) + 1)
"""
self._run_full_test(
run_code, "dynamo", "TestingOnlyCompileError", isolate=False
)
# Ensure that the testing backends pass when relu is not present.
def _test_after_dynamo_backend_passes(self, device, backend):
@torch.compile(backend=backend)
def inner(x):
for _ in range(10):
x = torch.sin(x)
for _ in range(10):
x = torch.cos(x)
return x
inner(torch.randn(20, 20).to(device))
def test_after_dynamo_cpu_compile_backend_passes(self):
self._test_after_dynamo_backend_passes("cpu", "relu_compile_error_TESTING_ONLY")
def test_after_dynamo_cpu_runtime_backend_passes(self):
self._test_after_dynamo_backend_passes("cpu", "relu_runtime_error_TESTING_ONLY")
def test_after_dynamo_cpu_accuracy_backend_passes(self):
self._test_after_dynamo_backend_passes(
"cpu", "relu_accuracy_error_TESTING_ONLY"
)
@requires_gpu
def test_after_dynamo_cuda_compile_backend_passes(self, device):
self._test_after_dynamo_backend_passes(
device, "relu_compile_error_TESTING_ONLY"
)
@requires_gpu
def test_after_dynamo_cuda_runtime_backend_passes(self, device):
self._test_after_dynamo_backend_passes(
device, "relu_runtime_error_TESTING_ONLY"
)
@requires_gpu
def test_after_dynamo_cuda_accuracy_backend_passes(self, device):
self._test_after_dynamo_backend_passes(
device, "relu_accuracy_error_TESTING_ONLY"
)
# Test that a module with mixed cpu/(cuda|xpu) parts with an error after dynamo can be repro'd
@skipIfNNModuleInlined()
@requires_gpu
def test_cpu_cuda_module_after_dynamo(self, device):
backend_name = "relu_compile_error_TESTING_ONLY"
run_code = f"""\
| MinifierTests |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_standalone.py | {
"start": 6979,
"end": 11060
} | class ____:
def test_return_type(self) -> None:
plot1 = figure()
plot1.scatter([], [])
plot2 = figure()
plot2.scatter([], [])
# This is a testing artefact, users don't have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
r = bes.components(plot1)
assert len(r) == 2
_, divs0 = bes.components((plot1, plot2))
assert isinstance(divs0, tuple)
_, divs1 = bes.components([plot1, plot2])
assert isinstance(divs1, tuple)
_, divs2 = bes.components({"Plot 1": plot1, "Plot 2": plot2})
assert isinstance(divs2, dict)
assert all(isinstance(x, str) for x in divs2.keys())
# explicit test for OrderedDict (don't replace with dict)
_, divs3 = bes.components(OrderedDict([("Plot 1", plot1), ("Plot 2", plot2)]))
assert isinstance(divs3, OrderedDict)
assert all(isinstance(x, str) for x in divs3.keys())
@patch('bokeh.embed.util.make_globally_unique_css_safe_id', new_callable=lambda: stable_id)
@patch('bokeh.embed.util.make_globally_unique_id', new_callable=lambda: stable_id)
def test_plot_dict_returned_when_wrap_plot_info_is_false(self, mock_make_css_safe_id: MagicMock, mock_make_id: MagicMock) -> None:
doc = Document()
plot1 = figure()
plot1.scatter([], [])
doc.add_root(plot1)
plot2 = figure()
plot2.scatter([], [])
doc.add_root(plot2)
expected_plotdict_1 = RenderRoot(elementid=ID("ID"), id=ID("ID"))
expected_plotdict_2 = RenderRoot(elementid=ID("ID"), id=ID("ID"))
_, plotdict = bes.components(plot1, wrap_plot_info=False)
assert plotdict == expected_plotdict_1
_, plotids = bes.components((plot1, plot2), wrap_plot_info=False)
assert plotids == (expected_plotdict_1, expected_plotdict_2)
_, plotiddict = bes.components({'p1': plot1, 'p2': plot2}, wrap_plot_info=False)
assert plotiddict == {'p1': expected_plotdict_1, 'p2': expected_plotdict_2}
def test_result_attrs(self, test_plot: figure) -> None:
bs4 = pytest.importorskip("bs4")
script, _ = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.find_all(name='script')
assert len(scripts) == 1
assert scripts[0].attrs == {'type': 'text/javascript'}
@patch('bokeh.embed.util.make_globally_unique_css_safe_id', new=stable_id)
@patch('bokeh.embed.util.make_globally_unique_id', new=stable_id)
def test_div_attrs(self, test_plot: figure) -> None:
bs4 = pytest.importorskip("bs4")
_, div = bes.components(test_plot)
html = bs4.BeautifulSoup(div, "html.parser")
els = html.find_all(name='div')
assert len(els) == 1
el = els[0]
assert set(el.attrs) == {"data-root-id", "id", "style"}
assert el.attrs["id"] == "ID"
assert el.attrs["data-root-id"] == test_plot.id
assert el.attrs["style"] == "display: contents;"
assert el.string is None
def test_script_is_utf8_encoded(self, test_plot: figure) -> None:
script, _ = bes.components(test_plot)
assert isinstance(script, str)
def test_quoting(self, test_plot: figure) -> None:
script, _ = bes.components(test_plot)
assert """ not in script
assert "'foo'" not in script
assert "'foo'" in script
def test_output_is_without_script_tag_when_wrap_script_is_false(self, test_plot: figure) -> None:
bs4 = pytest.importorskip("bs4")
script, _ = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.find_all(name='script')
assert len(scripts) == 1
# XXX: this needs to account for indentation
#script_content = scripts[0].getText()
#rawscript, div = bes.components(test_plot, wrap_script=False)
#self.maxDiff = None
#assert rawscript.strip() == script_content.strip()
| Test_components |
python | django__django | django/contrib/gis/forms/widgets.py | {
"start": 2291,
"end": 3088
} | class ____(BaseGeometryWidget):
base_layer = "nasaWorldview"
template_name = "gis/openlayers.html"
map_srid = 3857
class Media:
css = {
"all": (
"https://cdn.jsdelivr.net/npm/ol@v7.2.2/ol.css",
"gis/css/ol3.css",
)
}
js = (
"https://cdn.jsdelivr.net/npm/ol@v7.2.2/dist/ol.js",
"gis/js/OLMapWidget.js",
)
def serialize(self, value):
return value.json if value else ""
def deserialize(self, value):
geom = super().deserialize(value)
# GeoJSON assumes WGS84 (4326). Use the map's SRID instead.
if geom and json_regex.match(value) and self.map_srid != 4326:
geom.srid = self.map_srid
return geom
| OpenLayersWidget |
python | PrefectHQ__prefect | tests/_internal/pydantic/test_validated_func.py | {
"start": 12356,
"end": 16376
} | class ____(BaseModel):
name: str = Field(..., description="Test name")
value: int = 42
""",
namespace,
)
TestModel = namespace["TestModel"]
# Define a function with the model as a parameter using string annotation
# This simulates what happens with `from __future__ import annotations`
def process_model(model: "TestModel") -> dict: # noqa: F821
return {"name": model.name, "value": model.value}
# Update the function's globals to include the TestModel
process_model.__globals__.update(namespace)
# Create validated function
vf = ValidatedFunction(process_model)
# Create an instance of the model
test_instance = TestModel(name="test")
# This should work without raising PydanticUserError about undefined models
result = vf.validate_call_args((test_instance,), {})
assert isinstance(result["model"], TestModel)
assert result["model"].name == "test"
assert result["model"].value == 42
def test_nested_pydantic_models_with_forward_refs(self):
"""Test nested Pydantic models with forward references work correctly."""
class Inner(BaseModel):
value: int
class Outer(BaseModel):
inner: Inner
name: str
# Simulate forward reference by using string annotation
def process_nested(data: "Outer") -> str: # noqa: F821
return data.name
# Add the types to the function's globals
process_nested.__globals__["Outer"] = Outer
process_nested.__globals__["Inner"] = Inner
vf = ValidatedFunction(process_nested)
# Create nested structure
outer_instance = Outer(inner=Inner(value=42), name="test")
result = vf.validate_call_args((outer_instance,), {})
assert isinstance(result["data"], Outer)
assert result["data"].name == "test"
assert result["data"].inner.value == 42
def test_no_rebuild_without_forward_refs(self):
"""Test that model_rebuild is not called when there are no forward references.
This is a performance optimization test - we should avoid the overhead
of model_rebuild() when it's not necessary.
"""
class MyModel(BaseModel):
name: str
# Function with concrete type annotations (no forward refs)
def process_data(model: MyModel, count: int = 0) -> dict:
return {"name": model.name, "count": count}
# Spy on model_rebuild to ensure it's NOT called during initialization
with patch.object(BaseModel, "model_rebuild") as mock_rebuild:
vf = ValidatedFunction(process_data)
# model_rebuild should NOT have been called since there are no forward refs
mock_rebuild.assert_not_called()
# The model should work correctly without rebuild
instance = MyModel(name="test")
# Also verify model_rebuild is NOT called during validation
with patch.object(vf.model, "model_rebuild") as mock_rebuild:
result = vf.validate_call_args((instance,), {"count": 5})
# model_rebuild should NOT be called during validation either
mock_rebuild.assert_not_called()
assert isinstance(result["model"], MyModel)
assert result["model"].name == "test"
assert result["count"] == 5
def test_forward_ref_defined_after_decorator(self):
"""Test that forward references work when type is defined after the function.
This is a regression test for issue #19447.
When using `from __future__ import annotations`, the @flow decorator
was failing if a forward-referenced Pydantic model was defined after
the function using it.
"""
# First, define A and the function WITHOUT B defined yet
namespace = {}
exec(
"""
from __future__ import annotations
from pydantic import BaseModel, Field
| TestModel |
python | getsentry__sentry | tests/sentry/integrations/github/test_issues.py | {
"start": 1244,
"end": 3680
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.organization = self.create_organization(owner=self.user)
ten_days = timezone.now() + datetime.timedelta(days=10)
self.integration = self.create_integration(
organization=self.organization,
provider="github",
external_id="github_external_id",
name="getsentry",
metadata={
"access_token": "some-token",
"expires_at": ten_days.strftime("%Y-%m-%dT%H:%M:%S"),
},
)
install = self.integration.get_installation(self.organization.id)
self.install = cast(GitHubIntegration, install)
@fixture(autouse=True)
def stub_get_jwt(self):
with patch.object(client, "get_jwt", return_value="jwt_token_1"):
yield
@responses.activate
def test_get_create_issue_config_without_group(self) -> None:
responses.add(
responses.GET,
"https://api.github.com/installation/repositories",
json={
"total_count": 2,
"repositories": [
{"full_name": "getsentry/sentry", "name": "sentry"},
{"full_name": "getsentry/other", "name": "other", "archived": True},
],
},
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/assignees",
json=[{"login": "leeandher"}],
)
responses.add(
responses.GET,
"https://api.github.com/repos/getsentry/sentry/labels",
json=[
{"name": "bug"},
{"name": "not-bug"},
],
)
install = self.install
config = install.get_create_issue_config(None, self.user, params={})
[repo_field, assignee_field, label_field] = config
assert repo_field["name"] == "repo"
assert repo_field["type"] == "select"
assert repo_field["label"] == "GitHub Repository"
assert assignee_field["name"] == "assignee"
assert assignee_field["type"] == "select"
assert assignee_field["label"] == "Assignee"
assert label_field["name"] == "labels"
assert label_field["type"] == "select"
assert label_field["label"] == "Labels"
| GitHubIssueBasicAllSiloTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/defs_state/base.py | {
"start": 1207,
"end": 4294
} | class ____(ABC, MayHaveInstanceWeakref[T_DagsterInstance]):
"""Interface for a state store that can be used to store and retrieve state for a given defs key.
Also contains a contextually supplied singleton instance of the StateStore, which can be set
using the `set` class method. This is used to ensure a StateStore is available to code
that is loading definitions.
"""
def get_latest_version(self, key: str) -> Optional[str]:
"""Returns the saved state version for the given defs key, if it exists.
Args:
key (str): The key of the state to retrieve.
Returns:
Optional[str]: The saved state version for the given key, if it exists.
"""
info = self.get_latest_defs_state_info()
return info.get_version(key) if info else None
@abstractmethod
def get_latest_defs_state_info(self) -> Optional[DefsStateInfo]:
"""Returns the saved state version for all defs keys.
Returns:
Optional[DefsStateInfo]: The saved state version info for all defs keys, if available.
"""
raise NotImplementedError()
@abstractmethod
def download_state_to_path(self, key: str, version: str, path: Path) -> None:
"""Loads the state file for the given defs key and version into the given file path.
Args:
key (str): The key of the state to retrieve.
version (str): The version of the state to retrieve.
path (Path): The path to write the state to.
Returns:
bool: True if the state was loaded, False otherwise.
"""
raise NotImplementedError()
@abstractmethod
def upload_state_from_path(self, key: str, version: str, path: Path) -> None:
"""Uploads the state stored at `path` to persistent storage.
Args:
key (str): The key of the state to persist.
version (str): The version of the state to persist.
path (Path): The path to the state to persist.
"""
raise NotImplementedError()
@abstractmethod
def set_latest_version(self, key: str, version: str) -> None:
"""Sets the latest version of the state for the given key.
Args:
key (str): The key of the state to persist.
version (str): The version of the state to persist.
"""
raise NotImplementedError()
def _sanitize_key(self, key: str) -> str:
return re.sub(r"[^A-Za-z0-9._-]", "__", key)
def _get_version_key(self, key: str) -> str:
"""Returns a storage key under which the latest version of a given key's state is stored."""
return f"__version__/{key}"
def _get_state_key(self, key: str, version: str) -> str:
"""Returns a storage key under which a given key's state at a given version is stored."""
return f"__state__/{key}/{version}"
@classmethod
def get(cls) -> Optional["DefsStateStorage"]:
"""Get the current StateStorage, if it has been set."""
return _current_storage.get()
| DefsStateStorage |
python | coleifer__peewee | tests/keys.py | {
"start": 13392,
"end": 14259
} | class ____(ModelTestCase):
requires = [User, Note]
def setUp(self):
super(TestForeignKeyConstraints, self).setUp()
self.set_foreign_key_pragma(True)
def tearDown(self):
self.set_foreign_key_pragma(False)
super(TestForeignKeyConstraints, self).tearDown()
def set_foreign_key_pragma(self, is_enabled):
if IS_SQLITE:
self.database.foreign_keys = 'on' if is_enabled else 'off'
def test_constraint_exists(self):
max_id = User.select(fn.MAX(User.id)).scalar() or 0
with self.assertRaisesCtx(IntegrityError):
with self.database.atomic():
Note.create(user=max_id + 1, content='test')
@requires_sqlite
def test_disable_constraint(self):
self.set_foreign_key_pragma(False)
Note.create(user=0, content='test')
| TestForeignKeyConstraints |
python | python-visualization__folium | folium/template.py | {
"start": 1195,
"end": 1371
} | class ____(jinja2.Environment):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters["tojavascript"] = tojavascript
| Environment |
python | pypa__setuptools | setuptools/tests/integration/helpers.py | {
"start": 1081,
"end": 2688
} | class ____:
"""Compatibility layer for ZipFile/Info and TarFile/Info"""
def __init__(self, filename) -> None:
self._filename = filename
if filename.endswith("tar.gz"):
self._obj: tarfile.TarFile | ZipFile = tarfile.open(filename, "r:gz")
elif filename.endswith("zip"):
self._obj = ZipFile(filename)
else:
raise ValueError(f"{filename} doesn't seem to be a zip or tar.gz")
def __iter__(self) -> Iterator[ZipInfo] | Iterator[tarfile.TarInfo]:
if hasattr(self._obj, "infolist"):
return iter(self._obj.infolist())
return iter(self._obj)
def get_name(self, zip_or_tar_info):
if hasattr(zip_or_tar_info, "filename"):
return zip_or_tar_info.filename
return zip_or_tar_info.name
def get_content(self, zip_or_tar_info):
if hasattr(self._obj, "extractfile"):
content = self._obj.extractfile(zip_or_tar_info)
if content is None:
msg = f"Invalid {zip_or_tar_info.name} in {self._filename}"
raise ValueError(msg)
return str(content.read(), "utf-8")
return str(self._obj.read(zip_or_tar_info), "utf-8")
def get_sdist_members(sdist_path):
with tarfile.open(sdist_path, "r:gz") as tar:
files = [Path(f) for f in tar.getnames()]
# remove root folder
relative_files = ("/".join(f.parts[1:]) for f in files)
return {f for f in relative_files if f}
def get_wheel_members(wheel_path):
with ZipFile(wheel_path) as zipfile:
return set(zipfile.namelist())
| Archive |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/ec2.py | {
"start": 5654,
"end": 10164
} | class ____(AwsBaseOperator[EC2Hook]):
"""
Create and start a specified number of EC2 Instances using boto3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EC2CreateInstanceOperator`
:param image_id: ID of the AMI used to create the instance.
:param max_count: Maximum number of instances to launch. Defaults to 1.
:param min_count: Minimum number of instances to launch. Defaults to 1.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param poll_interval: Number of seconds to wait before attempting to
check state of instance. Only used if wait_for_completion is True. Default is 20.
:param max_attempts: Maximum number of attempts when checking state of instance.
Only used if wait_for_completion is True. Default is 20.
:param config: Dictionary for arbitrary parameters to the boto3 run_instances call.
:param wait_for_completion: If True, the operator will wait for the instance to be
in the `running` state before returning.
"""
aws_hook_class = EC2Hook
operator_extra_links = (EC2InstanceDashboardLink(),)
template_fields: Sequence[str] = aws_template_fields(
"image_id",
"max_count",
"min_count",
"aws_conn_id",
"region_name",
"config",
"wait_for_completion",
)
def __init__(
self,
image_id: str,
max_count: int = 1,
min_count: int = 1,
poll_interval: int = 20,
max_attempts: int = 20,
config: dict | None = None,
wait_for_completion: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.image_id = image_id
self.max_count = max_count
self.min_count = min_count
self.poll_interval = poll_interval
self.max_attempts = max_attempts
self.config = config or {}
self.wait_for_completion = wait_for_completion
@property
def _hook_parameters(self) -> dict[str, Any]:
return {**super()._hook_parameters, "api_type": "client_type"}
def execute(self, context: Context):
instances = self.hook.conn.run_instances(
ImageId=self.image_id,
MinCount=self.min_count,
MaxCount=self.max_count,
**self.config,
)["Instances"]
instance_ids = self._on_kill_instance_ids = [instance["InstanceId"] for instance in instances]
# Console link is for EC2 dashboard list, not individual instances when more than 1 instance
EC2InstanceDashboardLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
instance_ids=EC2InstanceDashboardLink.format_instance_id_filter(instance_ids),
)
for instance_id in instance_ids:
self.log.info("Created EC2 instance %s", instance_id)
if self.wait_for_completion:
self.hook.get_waiter("instance_running").wait(
InstanceIds=[instance_id],
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempts,
},
)
# leave "_on_kill_instance_ids" in place for finishing post-processing
return instance_ids
def on_kill(self) -> None:
instance_ids = getattr(self, "_on_kill_instance_ids", [])
if instance_ids:
self.log.info("on_kill: Terminating instance/s %s", ", ".join(instance_ids))
""" ec2_hook = EC2Hook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
api_type="client_type",
) """
self.hook.terminate_instances(instance_ids=instance_ids)
super().on_kill()
| EC2CreateInstanceOperator |
python | run-llama__llama_index | llama-index-core/llama_index/core/evaluation/benchmarks/hotpotqa.py | {
"start": 682,
"end": 4609
} | class ____:
"""
Refer to https://hotpotqa.github.io/ for more details on the dataset.
"""
def _download_datasets(self) -> Dict[str, str]:
cache_dir = get_cache_dir()
dataset_paths = {}
dataset = "hotpot_dev_distractor"
dataset_full_path = os.path.join(cache_dir, "datasets", "HotpotQA")
if not os.path.exists(dataset_full_path):
url = DEV_DISTRACTOR_URL
try:
os.makedirs(dataset_full_path, exist_ok=True)
save_file = open(
os.path.join(dataset_full_path, "dev_distractor.json"), "wb"
)
response = requests.get(url, stream=True)
# Define the size of each chunk
chunk_size = 1024
# Loop over the chunks and parse the JSON data
for chunk in tqdm.tqdm(response.iter_content(chunk_size=chunk_size)):
if chunk:
save_file.write(chunk)
except Exception as e:
if os.path.exists(dataset_full_path):
print(
"Dataset:", dataset, "not found at:", url, "Removing cached dir"
)
rmtree(dataset_full_path)
raise ValueError(f"could not download {dataset} dataset") from e
dataset_paths[dataset] = os.path.join(dataset_full_path, "dev_distractor.json")
print("Dataset:", dataset, "downloaded at:", dataset_full_path)
return dataset_paths
def run(
self,
query_engine: BaseQueryEngine,
queries: int = 10,
queries_fraction: Optional[float] = None,
show_result: bool = False,
) -> None:
dataset_paths = self._download_datasets()
dataset = "hotpot_dev_distractor"
dataset_path = dataset_paths[dataset]
print("Evaluating on dataset:", dataset)
print("-------------------------------------")
f = open(dataset_path)
query_objects = json.loads(f.read())
if queries_fraction:
queries_to_load = int(len(query_objects) * queries_fraction)
else:
queries_to_load = queries
queries_fraction = round(queries / len(query_objects), 5)
print(
f"Loading {queries_to_load} queries out of \
{len(query_objects)} (fraction: {queries_fraction})"
)
query_objects = query_objects[:queries_to_load]
assert isinstance(query_engine, RetrieverQueryEngine), (
"query_engine must be a RetrieverQueryEngine for this evaluation"
)
retriever = HotpotQARetriever(query_objects)
# Mock the query engine with a retriever
query_engine = query_engine.with_retriever(retriever=retriever)
scores = {"exact_match": 0.0, "f1": 0.0}
for query in query_objects:
query_bundle = QueryBundle(
query_str=query["question"]
+ " Give a short factoid answer (as few words as possible).",
custom_embedding_strs=[query["question"]],
)
response = query_engine.query(query_bundle)
em = int(
exact_match_score(
prediction=str(response), ground_truth=query["answer"]
)
)
f1, _, _ = f1_score(prediction=str(response), ground_truth=query["answer"])
scores["exact_match"] += em
scores["f1"] += f1
if show_result:
print("Question: ", query["question"])
print("Response:", response)
print("Correct answer: ", query["answer"])
print("EM:", em, "F1:", f1)
print("-------------------------------------")
for score in scores:
scores[score] /= len(query_objects)
print("Scores: ", scores)
| HotpotQAEvaluator |
python | pyinstaller__pyinstaller | tests/functional/scripts/pyi_multiprocessing_queue.py | {
"start": 544,
"end": 1593
} | class ____(multiprocessing.Process):
def __init__(self, queue):
multiprocessing.Process.__init__(self)
self.queue = queue
def run(self):
print('SendEventProcess: begin')
self.queue.put((1, 2))
print('SendEventProcess: end')
def main(start_method):
# Set start method
multiprocessing.set_start_method(start_method)
# Create a queue, and run a subprocess that fills it with data
print('Main: begin')
queue = multiprocessing.Queue()
process = SendEventProcess(queue)
process.start()
results = queue.get()
print(f'Main: retrieved results: {results}')
assert results == (1, 2)
process.join()
print('Main: end')
# Ensure process finished successfully
assert process.exitcode == 0, f"Process exited with non-success code {process.exitcode}!"
if __name__ == '__main__':
multiprocessing.freeze_support()
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} <start-method>")
sys.exit(1)
main(sys.argv[1])
| SendEventProcess |
python | django__django | tests/responses/test_cookie.py | {
"start": 5274,
"end": 6779
} | class ____(SimpleTestCase):
def test_default(self):
response = HttpResponse()
response.delete_cookie("c")
cookie = response.cookies["c"]
self.assertEqual(cookie["expires"], "Thu, 01 Jan 1970 00:00:00 GMT")
self.assertEqual(cookie["max-age"], 0)
self.assertEqual(cookie["path"], "/")
self.assertEqual(cookie["secure"], "")
self.assertEqual(cookie["domain"], "")
self.assertEqual(cookie["samesite"], "")
def test_delete_cookie_secure_prefix(self):
"""
delete_cookie() sets the secure flag if the cookie name starts with
__Host- or __Secure- (without that, browsers ignore cookies with those
prefixes).
"""
response = HttpResponse()
for prefix in ("Secure", "Host"):
with self.subTest(prefix=prefix):
cookie_name = "__%s-c" % prefix
response.delete_cookie(cookie_name)
self.assertIs(response.cookies[cookie_name]["secure"], True)
def test_delete_cookie_secure_samesite_none(self):
# delete_cookie() sets the secure flag if samesite='none'.
response = HttpResponse()
response.delete_cookie("c", samesite="none")
self.assertIs(response.cookies["c"]["secure"], True)
def test_delete_cookie_samesite(self):
response = HttpResponse()
response.delete_cookie("c", samesite="lax")
self.assertEqual(response.cookies["c"]["samesite"], "lax")
| DeleteCookieTests |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 64236,
"end": 70875
} | class ____(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = ops.convert_to_tensor(x)
else:
x_tensor = x
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
with self.cached_session():
return self.evaluate(y)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError), err_msg):
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"inner 3 dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (
([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], r"height must be >= target \+ offset"),
([0, 2, 3, 3], r"width must be >= target \+ offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
def testNameScope(self):
# Testing name scope requires a graph.
with ops.Graph().as_default():
image = array_ops.placeholder(dtypes.float32, shape=[55, 66, 3])
y = image_ops.crop_to_bounding_box(image, 0, 0, 55, 66)
self.assertTrue(y.name.startswith("crop_to_bounding_box"))
| CropToBoundingBoxTest |
python | getsentry__sentry | src/sentry/replays/usecases/summarize.py | {
"start": 1196,
"end": 22674
} | class ____(TypedDict):
id: str
title: str
message: str
timestamp: float # this should be in milliseconds
category: str
@sentry_sdk.trace
def fetch_error_details(project_id: int, error_ids: list[str]) -> list[EventDict]:
"""Fetch error details given error IDs and return a list of EventDict objects."""
try:
if not error_ids:
return []
node_ids = [Event.generate_node_id(project_id, event_id=id) for id in error_ids]
events = nodestore.backend.get_multi(node_ids)
return [
EventDict(
category="error",
id=event_id,
title=data.get("title", ""),
timestamp=data.get("timestamp") * 1000, # convert to milliseconds
message=data.get("message", ""),
)
for event_id, data in zip(error_ids, events.values())
if data is not None and data.get("timestamp") is not None
]
except Exception as e:
sentry_sdk.capture_exception(e)
return []
def _parse_iso_timestamp_to_ms(timestamp: str | None) -> float:
"""
Parses a nullable ISO timestamp to float milliseconds. Errors default to 0.
"""
if not timestamp:
return 0.0
try:
dt = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
return dt.timestamp() * 1000
except (ValueError, AttributeError):
return 0.0
@sentry_sdk.trace
def fetch_trace_connected_errors(
project: Project,
trace_ids: list[str],
limit: int,
start: datetime,
end: datetime,
) -> list[EventDict]:
"""Fetch same-trace events from both errors and issuePlatform datasets."""
if not trace_ids:
return []
# Get projects in the organization that the user has access to
org_projects = list(
Project.objects.filter(organization=project.organization, status=ObjectStatus.ACTIVE)
)
snuba_params = SnubaParams(
projects=org_projects,
start=start,
end=end,
organization=project.organization,
)
trace_ids_query = f"trace:[{','.join(trace_ids)}]"
# Query for errors dataset
try:
error_query_results = query_trace_connected_events(
dataset_label="errors",
selected_columns=[
"id",
"timestamp_ms",
"timestamp",
"title",
"message",
],
query=trace_ids_query,
snuba_params=snuba_params,
orderby=["-timestamp"],
limit=limit,
referrer=Referrer.API_REPLAY_SUMMARIZE_BREADCRUMBS.value,
)
except Exception as e:
sentry_sdk.capture_exception(e)
error_query_results = {"data": []}
# Query for issuePlatform dataset
try:
issue_query_results = query_trace_connected_events(
dataset_label="issuePlatform",
selected_columns=[
"event_id",
"title",
"subtitle",
"timestamp",
"timestamp_ms",
"occurrence_type_id",
],
query=trace_ids_query,
snuba_params=snuba_params,
orderby=["-timestamp"],
limit=limit,
referrer=Referrer.API_REPLAY_SUMMARIZE_BREADCRUMBS.value,
)
except Exception as e:
sentry_sdk.capture_exception(e)
issue_query_results = {"data": []}
# Process results and convert to EventDict objects
events = []
# Process error query results
for event in error_query_results["data"]:
timestamp = _parse_iso_timestamp_to_ms(
event.get("timestamp_ms")
) or _parse_iso_timestamp_to_ms(event.get("timestamp"))
message = event.get("message", "")
if timestamp:
events.append(
EventDict(
category="error",
id=event.get("id"),
title=event.get("title", ""),
timestamp=timestamp,
message=message,
)
)
# Process issuePlatform query results
for event in issue_query_results["data"]:
timestamp = _parse_iso_timestamp_to_ms(
event.get("timestamp_ms")
) or _parse_iso_timestamp_to_ms(event.get("timestamp"))
message = event.get("subtitle", "") or event.get("message", "")
if event.get("occurrence_type_id") == FeedbackGroup.type_id:
category = "feedback"
else:
category = "error"
# NOTE: The issuePlatform dataset query can return feedback.
# We also fetch feedback from nodestore in fetch_feedback_details
# for feedback breadcrumbs.
# We avoid creating duplicate feedback logs
# by filtering for unique feedback IDs during log generation.
if timestamp:
events.append(
EventDict(
category=category,
id=event.get("event_id"),
title=event.get("title", ""),
timestamp=timestamp,
message=message,
)
)
return events
@sentry_sdk.trace
def fetch_feedback_details(feedback_id: str | None, project_id) -> EventDict | None:
"""
Fetch user feedback associated with a specific feedback event ID.
"""
if feedback_id is None:
return None
try:
node_id = Event.generate_node_id(project_id, event_id=feedback_id)
event = nodestore.backend.get(node_id)
return (
EventDict(
category="feedback",
id=feedback_id,
title="User Feedback",
timestamp=event.get("timestamp") * 1000, # convert to milliseconds
message=event.get("contexts", {}).get("feedback", {}).get("message", ""),
)
if event and event.get("timestamp") is not None
else None
)
except Exception as e:
sentry_sdk.capture_exception(e)
return None
def generate_error_log_message(error: EventDict) -> str:
title = error["title"]
message = error["message"]
timestamp = float(error["timestamp"])
return f"User experienced an error: '{title}: {message}' at {timestamp}"
def generate_feedback_log_message(feedback: EventDict) -> str:
message = feedback["message"]
timestamp = float(feedback["timestamp"])
return f"User submitted feedback: '{message}' at {timestamp}"
@sentry_sdk.trace
def get_summary_logs(
segment_data: Iterator[tuple[int, memoryview]],
error_events: list[EventDict],
project_id: int,
is_mobile_replay: bool = False,
replay_start: str | None = None,
) -> list[str]:
# Sort error events by timestamp. This list includes all feedback events still.
error_events.sort(key=lambda x: x["timestamp"])
return list(
generate_summary_logs(
segment_data,
error_events,
project_id,
is_mobile_replay=is_mobile_replay,
replay_start=replay_start,
)
)
def generate_summary_logs(
segment_data: Iterator[tuple[int, memoryview]],
error_events: list[EventDict],
project_id,
is_mobile_replay: bool = False,
replay_start: str | None = None,
) -> Generator[str]:
"""
Generate log messages from events and errors in chronological order.
Avoid processing duplicate feedback events.
"""
error_idx = 0
seen_feedback_ids = {error["id"] for error in error_events if error["category"] == "feedback"}
replay_start_ms = _parse_iso_timestamp_to_ms(replay_start) if replay_start else 0.0
# Skip errors that occurred before replay start
while error_idx < len(error_events) and error_events[error_idx]["timestamp"] < replay_start_ms:
error_idx += 1
# Process segments
for _, segment in segment_data:
events = json.loads(segment.tobytes().decode("utf-8"))
for event in events:
event_type = which(event)
timestamp = get_replay_event_timestamp_ms(event, event_type)
if timestamp < replay_start_ms:
continue
# Check if we need to yield any error messages that occurred before this event
while (
error_idx < len(error_events) and error_events[error_idx]["timestamp"] < timestamp
):
error = error_events[error_idx]
if error["category"] == "error":
yield generate_error_log_message(error)
elif error["category"] == "feedback":
yield generate_feedback_log_message(error)
error_idx += 1
# Yield the current event's log message
if event_type == EventType.FEEDBACK:
feedback_id = event["data"]["payload"].get("data", {}).get("feedbackId")
# Filter out duplicate feedback events.
if feedback_id not in seen_feedback_ids:
feedback = fetch_feedback_details(feedback_id, project_id)
if feedback:
yield generate_feedback_log_message(feedback)
elif message := as_log_message(event, is_mobile_replay):
yield message
# Yield any remaining error messages
while error_idx < len(error_events):
error = error_events[error_idx]
if error["category"] == "error":
yield generate_error_log_message(error)
elif error["category"] == "feedback":
yield generate_feedback_log_message(error)
error_idx += 1
def as_log_message(event: dict[str, Any], is_mobile_replay: bool = False) -> str | None:
"""Return an event as a log message.
Useful in AI contexts where the event's structure is an impediment to the AI's understanding
of the interaction log. Not every event produces a log message. This function is overly coupled
to the AI use case. In later iterations, if more or all log messages are desired, this function
should be forked.
"""
event_type = which(event)
timestamp = get_replay_event_timestamp_ms(event, event_type)
trunc_length = 200 # used for CONSOLE logs and RESOURCE_* urls.
try:
match event_type:
case EventType.CLICK:
message = event["data"]["payload"]["message"]
return f"User clicked on {message} at {timestamp}"
case EventType.DEAD_CLICK:
message = event["data"]["payload"]["message"]
return f"User clicked on {message} but the triggered action was slow to complete at {timestamp}"
case EventType.RAGE_CLICK:
message = event["data"]["payload"]["message"]
return f"User rage clicked on {message} but the triggered action was slow to complete at {timestamp}"
case EventType.NAVIGATION_SPAN:
# for web replays, we favor NAVIGATION_SPAN
# since the frontend favors navigation span events in the breadcrumb tab
# for mobile replays, we only have access to NAVIGATION events.
if not is_mobile_replay:
to = event["data"]["payload"]["description"]
return f"User navigated to: {to} at {timestamp}"
else:
return None
case EventType.CONSOLE:
message = str(event["data"]["payload"]["message"])
if len(message) > trunc_length:
message = message[:trunc_length] + " [truncated]"
return f"Logged: '{message}' at {timestamp}"
case EventType.RESOURCE_FETCH:
payload = event["data"]["payload"]
method = payload["data"]["method"]
status_code = payload["data"]["statusCode"]
description = payload["description"]
# Format URL
url = _parse_url(description, trunc_length)
# Check if the tuple is valid and response size exists
sizes_tuple = parse_network_content_lengths(event)
response_size = None
if sizes_tuple and sizes_tuple[1] is not None:
response_size = str(sizes_tuple[1])
# Skip successful requests
if status_code and str(status_code).startswith("2"):
return None
if response_size is None:
return (
f'Fetch request "{method} {url}" failed with {status_code} at {timestamp}'
)
else:
return f'Fetch request "{method} {url}" failed with {status_code} ({response_size} bytes) at {timestamp}'
case EventType.RESOURCE_XHR:
payload = event["data"]["payload"]
method = payload["data"]["method"]
status_code = payload["data"]["statusCode"]
description = payload["description"]
# Format URL
url = _parse_url(description, trunc_length)
# Check if the tuple is valid and response size exists
sizes_tuple = parse_network_content_lengths(event)
response_size = None
if sizes_tuple and sizes_tuple[1] is not None:
response_size = str(sizes_tuple[1])
# Skip successful requests
if status_code and str(status_code).startswith("2"):
return None
if response_size is None:
return f'XHR request "{method} {url}" failed with {status_code} at {timestamp}'
else:
return f'XHR request "{method} {url}" failed with {status_code} ({response_size} bytes) at {timestamp}'
case EventType.LCP:
duration = event["data"]["payload"]["data"]["size"]
rating = event["data"]["payload"]["data"]["rating"]
return f"Application largest contentful paint: {duration} ms and has a {rating} rating at {timestamp}"
case EventType.HYDRATION_ERROR:
return f"There was a hydration error on the page at {timestamp}"
case EventType.TAP:
message = event["data"]["payload"].get("message")
if message:
return f"User tapped on {message} at {timestamp}"
else:
return None
case EventType.DEVICE_BATTERY:
charging = event["data"]["payload"]["data"]["charging"]
level = event["data"]["payload"]["data"]["level"]
return f"Device battery was {level}% and {'charging' if charging else 'not charging'} at {timestamp}"
case EventType.DEVICE_ORIENTATION:
position = event["data"]["payload"]["data"]["position"]
return f"Device orientation was changed to {position} at {timestamp}"
case EventType.DEVICE_CONNECTIVITY:
state = event["data"]["payload"]["data"]["state"]
return f"Device connectivity was changed to {state} at {timestamp}"
case EventType.SCROLL:
view_id = event["data"]["payload"]["data"].get("view.id", "")
direction = event["data"]["payload"]["data"].get("direction", "")
return f"User scrolled {view_id} {direction} at {timestamp}"
case EventType.SWIPE:
view_id = event["data"]["payload"]["data"].get("view.id", "")
direction = event["data"]["payload"]["data"].get("direction", "")
return f"User swiped {view_id} {direction} at {timestamp}"
case EventType.BACKGROUND:
return f"User moved the app to the background at {timestamp}"
case EventType.FOREGROUND:
return f"User moved the app to the foreground at {timestamp}"
case EventType.MUTATIONS:
return None
case EventType.UNKNOWN:
return None
case EventType.CANVAS:
return None
case EventType.OPTIONS:
return None
case EventType.MEMORY:
return None
case EventType.FEEDBACK:
return None # the log message is processed before this method is called
case EventType.SLOW_CLICK:
return None
case EventType.UI_BLUR:
return None
case EventType.UI_FOCUS:
return None
case EventType.RESOURCE_IMAGE:
return None
case EventType.RESOURCE_SCRIPT:
return None
case EventType.CLS:
return None
case EventType.NAVIGATION:
if is_mobile_replay:
to = event["data"]["payload"]["data"]["to"]
return f"User navigated to: {to} at {timestamp}"
else:
return None
case EventType.MULTI_CLICK:
return None
except (KeyError, ValueError, TypeError):
logger.exception(
"Error parsing event in replay AI summary",
extra={
"event": json.dumps(event),
},
)
return None
def _parse_url(s: str, trunc_length: int) -> str:
"""
Attempt to validate and return a formatted URL from a string (netloc/path?query).
If validation fails, return the raw string truncated to trunc_length.
"""
try:
parsed_url = urlparse(s)
if parsed_url.netloc:
path = parsed_url.path.lstrip("/")
url = f"{parsed_url.netloc}/{path}"
if parsed_url.query:
url += f"?{parsed_url.query}"
return url
except ValueError:
pass
if len(s) > trunc_length:
return s[:trunc_length] + " [truncated]"
return s
@sentry_sdk.trace
def rpc_get_replay_summary_logs(
project_id: int,
replay_id: str,
num_segments: int,
) -> dict[str, Any]:
"""
RPC call for Seer. Downloads a replay's segment data, queries associated errors, and parses this into summary logs.
"""
project = Project.objects.get(id=project_id)
# Look for the replay in the last 90 days.
start, end = default_start_end_dates()
# Fetch the replay's error and trace IDs from the replay_id, as well as the start and end times.
snuba_response = query_replay_instance(
project_id=project.id,
replay_id=replay_id,
start=start,
end=end,
organization=project.organization,
request_user_id=None, # This is for the viewed_by_me field which is unused for summaries.
)
processed_response = process_raw_response(
snuba_response,
fields=[], # Defaults to all fields.
)
# 404s should be handled in the originating Sentry endpoint.
# If the replay is missing here just return an empty response.
if not processed_response:
return {"logs": []}
error_ids = processed_response[0].get("error_ids", [])
trace_ids = processed_response[0].get("trace_ids", [])
platform = processed_response[0].get("platform")
is_mobile_replay = platform in MOBILE if platform else False
# Use the replay's start and end times to clamp the error queries. Fuzz 10s for clockskew.
replay_start = processed_response[0].get("started_at")
replay_end = processed_response[0].get("finished_at")
if replay_start:
start = max(
datetime.fromisoformat(replay_start) - timedelta(seconds=10),
datetime.now(UTC) - timedelta(days=90),
)
if replay_end:
end = min(datetime.fromisoformat(replay_end) + timedelta(seconds=10), datetime.now(UTC))
# Fetch same-trace errors.
trace_connected_errors = fetch_trace_connected_errors(
project=project,
trace_ids=trace_ids,
start=start,
end=end,
limit=100,
)
trace_connected_error_ids = {x["id"] for x in trace_connected_errors}
# Fetch directly linked errors, if they weren't returned by the trace query.
direct_errors = fetch_error_details(
project_id=project.id,
error_ids=[x for x in error_ids if x not in trace_connected_error_ids],
)
error_events = direct_errors + trace_connected_errors
# Metric names kept for backwards compatibility.
metrics.distribution(
"replays.endpoints.project_replay_summary.direct_errors",
value=len(direct_errors),
)
metrics.distribution(
"replays.endpoints.project_replay_summary.trace_connected_errors",
value=len(trace_connected_errors),
)
metrics.distribution(
"replays.endpoints.project_replay_summary.num_trace_ids",
value=len(trace_ids),
)
# Download segment data.
segment_md = fetch_segments_metadata(project.id, replay_id, 0, num_segments)
segment_data = iter_segment_data(segment_md)
# Combine replay and error data and parse into logs.
logs = get_summary_logs(
segment_data,
error_events,
project.id,
is_mobile_replay=is_mobile_replay,
replay_start=replay_start,
)
return {"logs": logs}
| EventDict |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec20.py | {
"start": 250,
"end": 746
} | class ____(Generic[T, P1]):
f: Callable[P1, int]
x: T
def x1(x: X[int, P2]) -> str: ...
def x2(x: X[int, Concatenate[int, P2]]) -> str: ...
def X3(x: X[int, [int, bool]]) -> str: ...
def x4(x: X[int, ...]) -> str: ...
# This should generate an error because "int" can't be bound to a ParamSpec.
def x5(x: X[int, int]) -> str: ...
# This should generate an error.
def x6(x: X[..., ...]) -> str: ...
# This should generate an error.
def x7(x: X[[int], [int, int]]) -> str: ...
| X |
python | doocs__leetcode | solution/0200-0299/0281.Zigzag Iterator/Solution.py | {
"start": 0,
"end": 840
} | class ____:
def __init__(self, v1: List[int], v2: List[int]):
self.cur = 0
self.size = 2
self.indexes = [0] * self.size
self.vectors = [v1, v2]
def next(self) -> int:
vector = self.vectors[self.cur]
index = self.indexes[self.cur]
res = vector[index]
self.indexes[self.cur] = index + 1
self.cur = (self.cur + 1) % self.size
return res
def hasNext(self) -> bool:
start = self.cur
while self.indexes[self.cur] == len(self.vectors[self.cur]):
self.cur = (self.cur + 1) % self.size
if self.cur == start:
return False
return True
# Your ZigzagIterator object will be instantiated and called as such:
# i, v = ZigzagIterator(v1, v2), []
# while i.hasNext(): v.append(i.next())
| ZigzagIterator |
python | xlwings__xlwings | xlwings/conversion/standard.py | {
"start": 3917,
"end": 4826
} | class ____:
def __init__(self, options):
self.ndim = options.get("ndim", None)
def __call__(self, c):
# the assumption is that value is 2-dimensional at this stage
if self.ndim is None:
if len(c.value) == 1:
c.value = c.value[0][0] if len(c.value[0]) == 1 else c.value[0]
elif len(c.value[0]) == 1:
c.value = [x[0] for x in c.value]
else:
c.value = c.value
elif self.ndim == 1:
if len(c.value) == 1:
c.value = c.value[0]
elif len(c.value[0]) == 1:
c.value = [x[0] for x in c.value]
else:
raise Exception("Range must be 1-by-n or n-by-1 when ndim=1.")
# ndim = 2 is a no-op
elif self.ndim != 2:
raise ValueError("Invalid c.value ndim=%s" % self.ndim)
| AdjustDimensionsStage |
python | charliermarsh__ruff | crates/ruff_benchmark/resources/pydantic/types.py | {
"start": 17092,
"end": 17344
} | class ____(SecretField[str]):
_error_kind = 'string_type'
@classmethod
def _pre_core_schema(cls) -> core_schema.CoreSchema:
return core_schema.str_schema()
def _display(self) -> str:
return secret_display(self)
| SecretStr |
python | spack__spack | lib/spack/spack/package.py | {
"start": 5634,
"end": 15937
} | class ____:
debug = _tty.debug
error = _tty.error
info = _tty.info
msg = _tty.msg
warn = _tty.warn
def is_system_path(path: str) -> bool:
"""Returns :obj:`True` iff the argument is a system path.
.. deprecated:: v2.0
"""
warnings.warn(
"spack.package.is_system_path is deprecated", category=SpackAPIWarning, stacklevel=2
)
return _is_system_path(path)
def filter_system_paths(paths: Iterable[str]) -> List[str]:
"""Returns a copy of the input where system paths are filtered out.
.. deprecated:: v2.0
"""
warnings.warn(
"spack.package.filter_system_paths is deprecated", category=SpackAPIWarning, stacklevel=2
)
return _filter_system_paths(paths)
#: Assigning this to :attr:`spack.package_base.PackageBase.flag_handler` means that compiler flags
#: are passed to the build system. This can be used in any package that derives from a build system
#: class that implements :meth:`spack.package_base.PackageBase.flags_to_build_system_args`.
#:
#: See also :func:`env_flags` and :func:`inject_flags`.
#:
#: Example::
#:
#: from spack.package import *
#:
#: class MyPackage(CMakePackage):
#: flag_handler = build_system_flags
build_system_flags = PackageBase.build_system_flags
#: Assigning this to :attr:`spack.package_base.PackageBase.flag_handler` means that compiler flags
#: are set as canonical environment variables.
#:
#: See also :func:`build_system_flags` and :func:`inject_flags`.
#:
#: Example::
#:
#: from spack.package import *
#:
#: class MyPackage(MakefilePackage):
#: flag_handler = env_flags
env_flags = PackageBase.env_flags
#: This is the default value of :attr:`spack.package_base.PackageBase.flag_handler`, which tells
#: Spack to inject compiler flags through the compiler wrappers, which means that the build system
#: will not see them directly. This is typically a good default, but in rare case you may need to
#: use :func:`env_flags` or :func:`build_system_flags` instead.
#:
#: See also :func:`build_system_flags` and :func:`env_flags`.
#:
#: Example::
#:
#: from spack.package import *
#:
#: class MyPackage(MakefilePackage):
#: flag_handler = inject_flags
inject_flags = PackageBase.inject_flags
api: Dict[str, Tuple[str, ...]] = {
"v2.0": (
"BaseBuilder",
"Builder",
"Dict",
"EnvironmentModifications",
"Executable",
"FileFilter",
"FileList",
"HeaderList",
"InstallError",
"LibraryList",
"List",
"MakeExecutable",
"NoHeadersError",
"NoLibrariesError",
"Optional",
"PackageBase",
"Prefix",
"ProcessError",
"SkipTest",
"Spec",
"Version",
"all_deptypes",
"ancestor",
"any_combination_of",
"auto_or_any_combination_of",
"bash_completion_path",
"build_system_flags",
"build_system",
"cache_extra_test_sources",
"can_access",
"can_splice",
"cd",
"change_sed_delimiter",
"check_outputs",
"conditional",
"conflicts",
"copy_tree",
"copy",
"default_args",
"depends_on",
"determine_number_of_jobs",
"disjoint_sets",
"env_flags",
"env",
"extends",
"filter_compiler_wrappers",
"filter_file",
"find_all_headers",
"find_first",
"find_headers",
"find_libraries",
"find_required_file",
"find_system_libraries",
"find",
"fish_completion_path",
"fix_darwin_install_name",
"force_remove",
"force_symlink",
"get_escaped_text_output",
"inject_flags",
"install_test_root",
"install_tree",
"install",
"is_exe",
"join_path",
"keep_modification_time",
"library_extensions",
"license",
"maintainers",
"makedirs",
"mkdir",
"mkdirp",
"move",
"on_package_attributes",
"patch",
"provides",
"pwd",
"redistribute",
"register_builder",
"remove_directory_contents",
"remove_linked_tree",
"remove",
"removedirs",
"rename",
"requires",
"resource",
"rmtree",
"run_after",
"run_before",
"set_executable",
"set_install_permissions",
"symlink",
"test_part",
"touch",
"tty",
"variant",
"ver",
"version",
"when",
"which_string",
"which",
"working_dir",
"zsh_completion_path",
),
"v2.1": ("CompilerError", "SpackError"),
"v2.2": (
"BuilderWithDefaults",
"ClassProperty",
"CompilerPropertyDetector",
"GenericBuilder",
"HKEY",
"LC_ID_DYLIB",
"LinkTree",
"MachO",
"ModuleChangePropagator",
"Package",
"WindowsRegistryView",
"apply_macos_rpath_fixups",
"classproperty",
"compare_output_file",
"compare_output",
"compile_c_and_execute",
"compiler_spec",
"create_builder",
"dedupe",
"delete_needed_from_elf",
"delete_rpath",
"environment_modifications_for_specs",
"execute_install_time_tests",
"filter_shebang",
"filter_system_paths",
"find_all_libraries",
"find_compilers",
"get_cmake_prefix_path",
"get_effective_jobs",
"get_elf_compat",
"get_path_args_from_module_line",
"get_user",
"has_shebang",
"host_platform",
"is_system_path",
"join_url",
"kernel_version",
"libc_from_dynamic_linker",
"macos_version",
"make_package_test_rpath",
"memoized",
"microarchitecture_flags_from_target",
"microarchitecture_flags",
"module_command",
"parse_dynamic_linker",
"parse_elf",
"path_contains_subdirectory",
"readlink",
"safe_remove",
"sbang_install_path",
"sbang_shebang_line",
"set_env",
"shared_library_suffix",
"spack_script",
"static_library_suffix",
"substitute_version_in_url",
"windows_sfn",
),
}
# Splatting does not work for static analysis tools.
__all__ = [
# v2.0
"BaseBuilder",
"Builder",
"Dict",
"EnvironmentModifications",
"Executable",
"FileFilter",
"FileList",
"HeaderList",
"InstallError",
"LibraryList",
"List",
"MakeExecutable",
"NoHeadersError",
"NoLibrariesError",
"Optional",
"PackageBase",
"Prefix",
"ProcessError",
"SkipTest",
"Spec",
"Version",
"all_deptypes",
"ancestor",
"any_combination_of",
"auto_or_any_combination_of",
"bash_completion_path",
"build_system_flags",
"build_system",
"cache_extra_test_sources",
"can_access",
"can_splice",
"cd",
"change_sed_delimiter",
"check_outputs",
"conditional",
"conflicts",
"copy_tree",
"copy",
"default_args",
"depends_on",
"determine_number_of_jobs",
"disjoint_sets",
"env_flags",
"env",
"extends",
"filter_compiler_wrappers",
"filter_file",
"find_all_headers",
"find_first",
"find_headers",
"find_libraries",
"find_required_file",
"find_system_libraries",
"find",
"fish_completion_path",
"fix_darwin_install_name",
"force_remove",
"force_symlink",
"get_escaped_text_output",
"inject_flags",
"install_test_root",
"install_tree",
"install",
"is_exe",
"join_path",
"keep_modification_time",
"library_extensions",
"license",
"maintainers",
"makedirs",
"mkdir",
"mkdirp",
"move",
"on_package_attributes",
"patch",
"provides",
"pwd",
"redistribute",
"register_builder",
"remove_directory_contents",
"remove_linked_tree",
"remove",
"removedirs",
"rename",
"requires",
"resource",
"rmtree",
"run_after",
"run_before",
"set_executable",
"set_install_permissions",
"symlink",
"test_part",
"touch",
"tty",
"variant",
"ver",
"version",
"when",
"which_string",
"which",
"working_dir",
"zsh_completion_path",
# v2.1
"CompilerError",
"SpackError",
# v2.2
"BuilderWithDefaults",
"ClassProperty",
"CompilerPropertyDetector",
"GenericBuilder",
"HKEY",
"LC_ID_DYLIB",
"LinkTree",
"MachO",
"ModuleChangePropagator",
"Package",
"WindowsRegistryView",
"apply_macos_rpath_fixups",
"classproperty",
"compare_output_file",
"compare_output",
"compile_c_and_execute",
"compiler_spec",
"create_builder",
"dedupe",
"delete_needed_from_elf",
"delete_rpath",
"environment_modifications_for_specs",
"execute_install_time_tests",
"filter_shebang",
"filter_system_paths",
"find_all_libraries",
"find_compilers",
"get_cmake_prefix_path",
"get_effective_jobs",
"get_elf_compat",
"get_path_args_from_module_line",
"get_user",
"has_shebang",
"host_platform",
"is_system_path",
"join_url",
"kernel_version",
"libc_from_dynamic_linker",
"macos_version",
"make_package_test_rpath",
"memoized",
"microarchitecture_flags_from_target",
"microarchitecture_flags",
"module_command",
"parse_dynamic_linker",
"parse_elf",
"path_contains_subdirectory",
"readlink",
"safe_remove",
"sbang_install_path",
"sbang_shebang_line",
"set_env",
"shared_library_suffix",
"spack_script",
"static_library_suffix",
"substitute_version_in_url",
"windows_sfn",
]
# These are just here for editor support; they may be set when the build env is set up.
configure: Executable
make_jobs: int
make: MakeExecutable
nmake: Executable
ninja: MakeExecutable
python_include: str
python_platlib: str
python_purelib: str
python: Executable
spack_cc: str
spack_cxx: str
spack_f77: str
spack_fc: str
prefix: Prefix
dso_suffix: str
| tty |
python | getsentry__sentry | src/sentry/remote_subscriptions/consumers/result_consumer.py | {
"start": 1247,
"end": 3765
} | class ____(abc.ABC, Generic[T, U]):
def __init__(self, use_subscription_lock: bool = False):
self.use_subscription_lock = use_subscription_lock
@property
@abc.abstractmethod
def subscription_model(self) -> type[U]:
pass
def __call__(self, identifier: str, result: T):
with metrics.timer(
"remote_subscriptions.result_consumer.call_timing",
tags={"identifier": identifier},
):
try:
# TODO: Handle subscription not existing - we should remove the subscription from
# the remote system in that case.
with sentry_sdk.start_transaction(
name=f"monitors.{identifier}.result_consumer.ResultProcessor",
op="result_processor.handle_result",
):
subscription = self.get_subscription(result)
if self.use_subscription_lock and subscription:
lock = locks.get(
f"subscription:{subscription.type}:{subscription.subscription_id}",
duration=10,
name=f"subscription_{identifier}",
)
with TimedRetryPolicy(10)(lock.acquire):
with metrics.timer(
"remote_subscriptions.result_consumer.handle_result_timing",
tags={"identifier": identifier},
):
self.handle_result(subscription, result)
else:
with metrics.timer(
"remote_subscriptions.result_consumer.handle_result_timing",
tags={"identifier": identifier},
):
self.handle_result(subscription, result)
except Exception:
logger.exception("Failed to process message result")
def get_subscription(self, result: T) -> U | None:
try:
return self.subscription_model.objects.get_from_cache(
subscription_id=self.get_subscription_id(result)
)
except self.subscription_model.DoesNotExist:
return None
@abc.abstractmethod
def get_subscription_id(self, result: T) -> str:
pass
@abc.abstractmethod
def handle_result(self, subscription: U | None, result: T):
pass
| ResultProcessor |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 7046,
"end": 7113
} | class ____(SyntaxException):
"""Invalid pragma"""
| PragmaException |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 25328,
"end": 27671
} | class ____(CondCases, TestCase):
def test_basic_nonsvd(self):
# Smoketest the non-svd norms
A = array([[1.0, 0, 1], [0, -2.0, 0], [0, 0, 3.0]])
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2 / 3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, "fro"), np.sqrt(265 / 12))
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, "fro"]
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
@skip(reason="NP_VER: fails on CI") # (
# True, run=False, reason="Platform/LAPACK-dependent failure, see gh-18914"
# )
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, "fro"]
p_pos = [None, 1, 2, "fro"]
A = np.ones((2, 2))
A[0, 1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float64))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1, 0, 1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0, 0] = 0
A[1, 1] = 0
for p in (None, 1, 2, "fro", -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0, 0], np.inf)
assert_equal(c[1, 1], np.inf)
assert_(np.isfinite(c[0, 1]))
assert_(np.isfinite(c[1, 0]))
| TestCond |
python | kubernetes-client__python | kubernetes/client/exceptions.py | {
"start": 383,
"end": 1558
} | class ____(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
| ApiTypeError |
python | huggingface__transformers | tests/utils/test_masking_utils.py | {
"start": 2764,
"end": 17069
} | class ____(unittest.TestCase):
def setup(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_packed_sequence_mask_sdpa(self):
config = LlamaConfig()
config._attn_implementation = "sdpa"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# First batch has 3 packed sequences of 4, 2 and 4 tokens respectively, second has 2 of 6 and 4 tokens
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
input_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
self.assertTrue((causal_mask == EXPECTED_PACKED_MASK).all())
def test_packed_sequence_mask_eager(self):
config = LlamaConfig()
config._attn_implementation = "eager"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# First batch has 3 packed sequences of 4, 2 and 4 tokens respectively, second has 2 of 6 and 4 tokens
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
input_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
min_dtype = torch.finfo(torch.float16).min
self.assertTrue((causal_mask == torch.where(EXPECTED_PACKED_MASK, 0.0, min_dtype)).all())
def test_packed_sequence_mask_flex_attention(self):
config = LlamaConfig()
config._attn_implementation = "flex_attention"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# First batch has 3 packed sequences of 4, 2 and 4 tokens respectively, second has 2 of 6 and 4 tokens
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
input_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
def dummy_mask_mod(b, h, q, kv):
return EXPECTED_PACKED_MASK[b, h, q, kv]
EXPECTED_BLOCK_MASK = create_block_mask(dummy_mask_mod, 2, None, 10, 10, device="cpu")
# We compatre the str representations, as the BlockMask objects themselves cannot easily be compared
self.assertEqual(causal_mask.to_string(), EXPECTED_BLOCK_MASK.to_string())
def test_find_packed_sequence_indices(self):
position_ids = torch.tensor([[0, 1, 2, 3, 0, 1, 0, 1, 2, 3], [0, 1, 2, 3, 4, 5, 0, 1, 2, 3]])
EXPECTED_SEQUENCE_INDICES = torch.tensor([[0, 0, 0, 0, 1, 1, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]])
self.assertTrue((find_packed_sequence_indices(position_ids) == EXPECTED_SEQUENCE_INDICES).all())
def test_nonpacked_sequence_mask_skip(self):
config = LlamaConfig()
config._attn_implementation = "sdpa"
batch_size = 2
sequence_length = 10
cache_position = torch.arange(sequence_length)
# Non-packed sequences
position_ids = torch.arange(sequence_length)[None, :]
causal_mask = create_causal_mask(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
input_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
# packed sequence should be skipped
self.assertTrue(causal_mask is None)
create_causal_mask_compiled = torch.compile(create_causal_mask, mode="reduce-overhead")
causal_mask = create_causal_mask_compiled(
config=config,
# we only need batch size, seq_length and dtype here - we don't care about the values of the embeddings
input_embeds=torch.empty((batch_size, sequence_length), dtype=torch.float16),
attention_mask=None,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
# cannot be skipped under compile, should result into a triu mask
self.assertTrue(torch.equal(~torch.ones(*causal_mask.shape).triu(diagonal=1).bool(), causal_mask))
def test_chunked_mask_with_left_padding_and_large_prefill(self):
# Make sure we have an attention_chunk_size in the config
config = LlamaConfig(attention_chunk_size=3, attn_implementation="sdpa")
batch_size = 2
sequence_length = 8
pad_tokens = 4
input_ids = torch.randint(100, 200, (batch_size, sequence_length))
attention_mask = torch.tensor(
[[0 if i < pad_tokens else 1 for i in range(sequence_length)], [1] * sequence_length]
)
inputs_embeds = torch.empty_like(input_ids, dtype=torch.float16)
cache_position = torch.arange(sequence_length)
position_ids = torch.empty(batch_size, sequence_length, dtype=cache_position.dtype)
position_ids[0, :pad_tokens] = 1
position_ids[0, pad_tokens:] = torch.arange(sequence_length - pad_tokens)
position_ids[1, :] = cache_position
chunked_attention_mask = create_chunked_causal_mask(
config=config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=None,
position_ids=position_ids,
)
# fmt: off
EXPECTED_CHUNKED_MASK = torch.tensor(
# Here, for the padded sequence, the chunk size should start correctly at index 4 (otherwise, with 4 padding
# tokens are chunk_size=3, the first chunk is from indices 0-2, then 3-6 if we don't account for the padding correctly)
[[[[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, True, False, False, False],
[False, False, False, False, True, True, False, False],
[False, False, False, False, True, True, True, False],
[False, False, False, False, False, False, False, True]]],
[[[ True, False, False, False, False, False, False, False],
[ True, True, False, False, False, False, False, False],
[ True, True, True, False, False, False, False, False],
[False, False, False, True, False, False, False, False],
[False, False, False, True, True, False, False, False],
[False, False, False, True, True, True, False, False],
[False, False, False, False, False, False, True, False],
[False, False, False, False, False, False, True, True]]]],
dtype=torch.bool)
# fmt: on
self.assertTrue((chunked_attention_mask == EXPECTED_CHUNKED_MASK).all())
def test_chunked_mask_with_left_padding_decoding(self):
# Make sure we have an attention_chunk_size in the config
config = LlamaConfig(attention_chunk_size=4, attn_implementation="sdpa", num_hidden_layers=1)
cache = DynamicCache(config=config)
# Sanity check
self.assertEqual(len(cache), 1)
self.assertTrue(isinstance(cache.layers[0], DynamicSlidingWindowLayer))
# Fill-in the Cache (sequence length is bigger than chunk size here)
batch_size = 2
prefill_size = 8
pad_tokens = 7
fake_kv = torch.rand(batch_size, 32, prefill_size, 32)
cache.update(fake_kv, fake_kv, 0, torch.arange(prefill_size))
# Create a new input after the prefill
input_ids = torch.randint(100, 200, (batch_size, 1))
attention_mask = torch.tensor(
[[0 if i < pad_tokens else 1 for i in range(prefill_size + 1)], [1] * (prefill_size + 1)]
)
inputs_embeds = torch.empty_like(input_ids, dtype=torch.float16)
cache_position = torch.tensor([prefill_size], dtype=int)
position_ids = torch.tensor([[prefill_size - pad_tokens], [prefill_size]])
chunked_attention_mask = create_chunked_causal_mask(
config=config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=cache,
position_ids=position_ids,
)
# To understand a bit more the following expected mask, here is the full 2d mask, where the "|" characters are the chunk
# separators (where the tokens should stop seeing each other)
# [0, 0, 0, 0, 0, 0, 0, | 1, 1], -> due to left padding, the first chunk only starts after the padding tokens
# [| 1, 1, 1, 1, | 1, 1, 1, 1, | 1]]) -> easy case, each 4 tokens is a new chunk
# fmt: off
EXPECTED_CHUNKED_MASK = torch.tensor(
# Here, for the padded sequence, the chunk size should start correctly at index 7 (the first unpadded
# index), and so only indices 7 and 8 should be True
[[[[False, False, True, True]]],
# Here, for the unpadded sequence, the chunks start at index 0. Since we have 9 tokens in total, the last
# token (index 8) will only see itself (we have 2 full chunks before)
[[[False, False, False, True]]]],
dtype=torch.bool)
# fmt: on
self.assertTrue((chunked_attention_mask == EXPECTED_CHUNKED_MASK).all())
@staticmethod
def _run_bidirectional_mask(mask_fn, attn_implementation):
def run_mask_creation(mask_fn, config, input_embeds, encoder_mask, cross_mask, encoder_hidden_states):
encoder_attn_mask = mask_fn(
config=config,
input_embeds=input_embeds,
attention_mask=encoder_mask,
)
cross_attn_mask = mask_fn(
config=config,
input_embeds=input_embeds,
attention_mask=cross_mask,
encoder_hidden_states=encoder_hidden_states,
)
return encoder_attn_mask, cross_attn_mask
# We use llama but could be also bert/bart --> we only need the `_attn_implementation` here
config = LlamaConfig()
config._attn_implementation = attn_implementation
# Meta data
batch_size = 2
q_length = 10
kv_length = 5
input_embeds = torch.ones((batch_size, q_length, 1), device=torch_device, dtype=torch.float16)
encoder_hidden_states = torch.ones((batch_size, kv_length, 1), device=torch_device, dtype=torch.float16)
encoder_mask = torch.ones_like(input_embeds)[..., 0]
cross_mask = torch.ones_like(encoder_hidden_states)[..., 0]
# Case 1: Full mask
full_mask_encoder_1, full_mask_cross_1 = run_mask_creation(
mask_fn=mask_fn,
config=config,
input_embeds=input_embeds,
encoder_mask=encoder_mask,
cross_mask=cross_mask,
encoder_hidden_states=encoder_hidden_states,
)
full_mask_encoder_2, full_mask_cross_2 = run_mask_creation(
mask_fn=mask_fn,
config=config,
input_embeds=input_embeds,
encoder_mask=None,
cross_mask=None,
encoder_hidden_states=encoder_hidden_states,
)
# Case 2: Padding involved
cross_mask[:, -1] = 0
encoder_mask[:, -1] = 0
padded_mask_encoder, padded_mask_cross = run_mask_creation(
mask_fn=mask_fn,
config=config,
input_embeds=input_embeds,
encoder_mask=encoder_mask,
cross_mask=cross_mask,
encoder_hidden_states=encoder_hidden_states,
)
full_masks = (full_mask_encoder_1, full_mask_encoder_2), (full_mask_cross_1, full_mask_cross_2)
padded_masks = (padded_mask_encoder, padded_mask_cross)
return full_masks, padded_masks
def test_bidirectional_mask_cudagraphs(self):
"""
Checks whether the bidirectional mask creation is compatible with cuda graphs, i.e. we do not into any error
during this test.
"""
mask_creation_function = torch.compile(create_bidirectional_mask, mode="reduce-overhead")
self._run_bidirectional_mask(mask_fn=mask_creation_function, attn_implementation="sdpa")
def test_bidirectional_mask_skip_eager(self):
"""
Checks whether the bidirectional mask creation can skip the mask creation if we have a full mask.
"""
full_masks, padded_mask = self._run_bidirectional_mask(
mask_fn=create_bidirectional_mask, attn_implementation="eager"
)
for alternative_masks in full_masks:
self.assertTrue(alternative_masks[0] is None)
self.assertTrue(alternative_masks[1] is None)
self.assertTrue(padded_mask[0] is not None)
self.assertTrue(padded_mask[1] is not None)
| MaskTest |
python | getsentry__sentry | src/sentry/integrations/bitbucket/uninstalled.py | {
"start": 861,
"end": 2371
} | class ____(Endpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
@csrf_exempt
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase:
return super().dispatch(request, *args, **kwargs)
def post(self, request: Request, *args, **kwargs) -> Response:
try:
token = request.META["HTTP_AUTHORIZATION"].split(" ", 1)[1]
except (KeyError, IndexError):
return self.respond(status=400)
try:
rpc_integration = get_integration_from_jwt(
token,
request.path,
IntegrationProviderSlug.BITBUCKET.value,
request.GET,
method="POST",
)
except AtlassianConnectValidationError:
return self.respond(status=400)
integration = Integration.objects.get(id=rpc_integration.id)
integration.update(status=ObjectStatus.DISABLED)
org_integrations = integration_service.get_organization_integrations(
integration_id=integration.id
)
for oi in org_integrations:
repository_service.disable_repositories_for_integration(
organization_id=oi.organization_id,
integration_id=integration.id,
provider="integrations:bitbucket",
)
return self.respond()
| BitbucketUninstalledEndpoint |
python | pytorch__pytorch | test/test_jit.py | {
"start": 570134,
"end": 570188
} | class ____(JitTestCase):
pass
| TestJitGeneratedModule |
python | crytic__slither | slither/core/cfg/scope.py | {
"start": 211,
"end": 483
} | class ____:
def __init__(
self, is_checked: bool, is_yul: bool, parent_scope: Union["Scope", "Function"]
) -> None:
self.nodes: List["Node"] = []
self.is_checked = is_checked
self.is_yul = is_yul
self.father = parent_scope
| Scope |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/constant_op_test.py | {
"start": 28428,
"end": 31932
} | class ____(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
@test_util.disable_xla("b/183567451: XLA doesn't yet support int4")
def testFillInt4(self):
np_ans = np.array([[-6] * 3] * 2).astype(dtypes_lib.int4.as_numpy_dtype)
self._compareAll([2, 3], np_ans[0][0], np_ans)
np_ans = np.array([[11] * 3] * 2).astype(dtypes_lib.uint4.as_numpy_dtype)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex128(self):
np_ans = np.array([[0.15 + 0.3j] * 3] * 2).astype(np.complex128)
self._compareAll([2, 3], np_ans[0][0], np_ans)
@test_util.run_deprecated_v1
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.session(use_gpu=False):
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testFillNegative(self):
with self.cached_session():
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(ValueError):
array_ops.fill(shape, 7)
# Using a placeholder so this won't be caught in static analysis.
dims = array_ops.placeholder(dtypes_lib.int32)
fill_t = array_ops.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
fill_t.eval({dims: shape})
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
array_ops.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = array_ops.fill(
[array_ops.placeholder(
dtypes_lib.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
in_v = constant_op.constant(5.0)
out_shape = [3, 2]
out_filled = array_ops.fill(out_shape, in_v)
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
out_shape)
self.assertLess(err, 1e-3)
| FillTest |
python | google__jax | tests/sparse_test.py | {
"start": 25058,
"end": 31510
} | class ____(sptu.SparseTestCase):
@jtu.sample_product(has_aux=[True, False])
def test_sparse_value_and_grad(self, has_aux):
rng_sparse = sptu.rand_sparse(self.rng())
rng = jtu.rand_default(self.rng())
y = rng(5, "float32")
X = rng_sparse((10, 5), "float32")
Xsp = sparse.BCOO.fromdense(X)
def f(X, y):
if has_aux:
return jnp.sum(X @ y), {'X': X.shape, 'y': y.shape}
return jnp.sum(X @ y)
with self.subTest("wrt sparse"):
val_de, grad_de = jax.value_and_grad(f, argnums=0, has_aux=has_aux)(X, y)
val_sp, grad_sp = sparse.value_and_grad(f, argnums=0, has_aux=has_aux)(Xsp, y)
self.assertIsInstance(grad_sp, sparse.BCOO)
self.assertAllClose(val_de, val_sp)
self.assertAllClose(grad_sp.data, sparse_bcoo._bcoo_extract(grad_sp.indices, grad_de))
with self.subTest("wrt dense"):
self.assertAllClose(jax.value_and_grad(f, argnums=1, has_aux=has_aux)(X, y),
sparse.value_and_grad(f, argnums=1, has_aux=has_aux)(Xsp, y))
@jtu.sample_product(has_aux=[True, False])
def test_sparse_grad(self, has_aux):
rng_sparse = sptu.rand_sparse(self.rng())
rng = jtu.rand_default(self.rng())
y = rng(5, "float32")
X = rng_sparse((10, 5), "float32")
Xsp = sparse.BCOO.fromdense(X)
def f(X, y):
if has_aux:
return jnp.sum(X @ y), {'X': X.shape, 'y': y.shape}
return jnp.sum(X @ y)
with self.subTest("wrt sparse"):
grad_de = jax.grad(f, argnums=0, has_aux=has_aux)(X, y)
grad_sp = sparse.grad(f, argnums=0, has_aux=has_aux)(Xsp, y)
if has_aux:
grad_de, aux_de = grad_de
grad_sp, aux_sp = grad_sp
self.assertAllClose(aux_de, aux_sp)
self.assertIsInstance(grad_sp, sparse.BCOO)
self.assertAllClose(grad_sp.data, sparse_bcoo._bcoo_extract(grad_sp.indices, grad_de))
with self.subTest("wrt dense"):
self.assertAllClose(jax.grad(f, argnums=1, has_aux=has_aux)(X, y),
sparse.grad(f, argnums=1, has_aux=has_aux)(Xsp, y))
@jtu.sample_product(
has_aux=[True, False],
transform=['jacrev', 'jacfwd', 'jacobian']
)
@jax.default_matmul_precision("float32")
def test_sparse_jacobian(self, has_aux, transform):
jac_dense = getattr(jax, transform)
jac_sparse = getattr(sparse, transform)
rng_sparse = sptu.rand_sparse(self.rng())
rng = jtu.rand_default(self.rng())
y = rng(5, "float32")
X = rng_sparse((10, 5), "float32")
Xsp = sparse.BCOO.fromdense(X)
def f(X, y):
if has_aux:
return X @ y, {'X': X.shape, 'y': y.shape}
return X @ y
with self.subTest("wrt sparse"):
grad_de = jac_dense(f, argnums=0, has_aux=has_aux)(X, y)
grad_sp = jac_sparse(f, argnums=0, has_aux=has_aux)(Xsp, y)
if has_aux:
grad_de, aux_de = grad_de
grad_sp, aux_sp = grad_sp
self.assertAllClose(aux_de, aux_sp)
self.assertIsInstance(grad_sp, sparse.BCOO)
self.assertAllClose(grad_sp.data, sparse_bcoo._bcoo_extract(grad_sp.indices, grad_de))
with self.subTest("wrt dense"):
rtol = 0.01 if jtu.test_device_matches(['tpu']) else None
self.assertAllClose(jac_dense(f, argnums=1, has_aux=has_aux)(X, y),
jac_sparse(f, argnums=1, has_aux=has_aux)(Xsp, y), rtol=rtol)
@jtu.sample_product(has_aux=[True, False],
deep=[True,False],
arg0=[True,False],
bias=[True,False])
def test_sparse_pytree_grad(self, has_aux, deep, arg0, bias):
rng_sparse = sptu.rand_sparse(self.rng())
rng = jtu.rand_default(self.rng())
y = rng(5, "float32")
X = rng_sparse((10, 5), "float32")
b = rng(10, "float32")
Xsp = sparse.BCOO.fromdense(X)
Xtree_sp = {'deep':{'X':Xsp},
'X':Xsp,
'list':[None,(b,None)]}
Xtree_de = {'deep':{'X':X},
'X':X,
'list':[None,(b,None)]}
def f(Xtree, y):
if deep:
out = Xtree['deep']['X'] @ y
else:
out = Xtree['X'] @ y
# Other grad variables
if bias:
out += Xtree['list'][1][0]
out = jnp.sum(out)
if has_aux:
return out, {'y': y.shape}
else:
return out
def g(y, Xtree):
if deep:
out = Xtree['deep']['X'] @ y
else:
out = Xtree['X'] @ y
# Other grad variables
if bias:
out += Xtree['list'][1][0]
out = jnp.sum(out)
if has_aux:
return out, {'y': y.shape}
return out
with self.subTest("wrt sparse"):
# Argument ordering
if arg0:
grad_de = jax.grad(f, argnums=0, has_aux=has_aux)(Xtree_de, y)
grad_sp = sparse.grad(f, argnums=0, has_aux=has_aux)(Xtree_sp, y)
else:
grad_de = jax.grad(g, argnums=1, has_aux=has_aux)(y, Xtree_de)
grad_sp = sparse.grad(g, argnums=1, has_aux=has_aux)(y, Xtree_sp)
if has_aux:
grad_de, aux_de = grad_de
grad_sp, aux_sp = grad_sp
self.assertAllClose(aux_de, aux_sp)
# Pytree structure
is_bcoo = lambda x: isinstance(x, sparse.bcoo.BCOO)
grad_densified = jax.tree_util.tree_map(sparse.todense, grad_sp,
is_leaf=is_bcoo)
self.assertEqual(jax.tree_util.tree_structure(grad_de),
jax.tree_util.tree_structure(grad_densified))
# Depth in tree
if deep:
grad_sp_arr = grad_sp['deep']['X']
grad_de_arr = grad_de['deep']['X']
else:
grad_sp_arr = grad_sp['X']
grad_de_arr = grad_de['X']
self.assertIsInstance(grad_sp_arr, sparse.BCOO)
self.assertAllClose(grad_sp_arr.data,
sparse_bcoo._bcoo_extract(grad_sp_arr.indices,
grad_de_arr))
# Other grad variables
if bias:
self.assertAllClose(grad_sp['list'][1][0],
grad_de['list'][1][0])
with self.subTest("wrt dense"):
# Argument ordering
if arg0:
self.assertAllClose(jax.grad(f, argnums=1, has_aux=has_aux)(Xtree_de, y),
sparse.grad(f, argnums=1, has_aux=has_aux)(Xtree_sp, y))
else:
self.assertAllClose(jax.grad(g, argnums=0, has_aux=has_aux)(y, Xtree_de),
sparse.grad(g, argnums=0, has_aux=has_aux)(y, Xtree_sp))
| SparseGradTest |
python | pytorch__pytorch | torch/utils/show_pickle.py | {
"start": 225,
"end": 1497
} | class ____:
def __init__(self, module, name, args) -> None:
self.module = module
self.name = name
self.args = args
# NOTE: We don't distinguish between state never set and state set to None.
self.state = None
def __repr__(self) -> str:
state_str = "" if self.state is None else f"(state={self.state!r})"
return f"{self.module}.{self.name}{self.args!r}{state_str}"
def __setstate__(self, state):
self.state = state
@staticmethod
def pp_format(printer, obj, stream, indent, allowance, context, level) -> None:
if not obj.args and obj.state is None:
stream.write(repr(obj))
return
if obj.state is None:
stream.write(f"{obj.module}.{obj.name}")
printer._format(obj.args, stream, indent + 1, allowance + 1, context, level)
return
if not obj.args:
stream.write(f"{obj.module}.{obj.name}()(state=\n")
indent += printer._indent_per_level
stream.write(" " * indent)
printer._format(obj.state, stream, indent, allowance + 1, context, level + 1)
stream.write(")")
return
raise Exception("Need to implement") # noqa: TRY002
| FakeObject |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_10334.py | {
"start": 127,
"end": 176
} | class ____:
"""Class with a slice decorator."""
| A |
python | tensorflow__tensorflow | tensorflow/python/distribute/vars_test.py | {
"start": 49579,
"end": 54297
} | class ____(test.TestCase, parameterized.TestCase):
def testScatterSub(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_sub, args=(delta,)))
def testScatterAdd(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_add, args=(delta,)))
def testScatterDiv(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 6., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [3.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_div, args=(delta,)))
def testScatterMul(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [3.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[4.], [5.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_mul, args=(delta,)))
def testScatterMin(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
def testScatterMax(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_max, args=(delta,)))
def testScatterUpdate(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
if __name__ == "__main__":
test_util.main()
| SyncOnReadScatterReplicaTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-revert-word-to-initial-state-i.py | {
"start": 42,
"end": 899
} | class ____(object):
def minimumTimeToInitialState(self, word, k):
"""
:type word: str
:type k: int
:rtype: int
"""
def ceil_divide(a, b):
return (a+b-1)//b
def z_function(s): # Time: O(n), Space: O(n)
z = [0]*len(s)
l, r = 0, 0
for i in xrange(1, len(z)):
if i <= r:
z[i] = min(r-i+1, z[i-l])
while i+z[i] < len(z) and s[z[i]] == s[i+z[i]]:
z[i] += 1
if i+z[i]-1 > r:
l, r = i, i+z[i]-1
return z
z = z_function(word)
for i in xrange(k, len(word), k):
if z[i] == len(word)-i:
return i//k
return ceil_divide(len(word), k)
# Time: O(n^2)
# Space: O(1)
# brute force
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/files.py | {
"start": 24157,
"end": 24805
} | class ____:
def __init__(self, files: Files) -> None:
self._files = files
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
self.download = to_custom_raw_response_wrapper(
files.download,
BinaryAPIResponse,
)
self.retrieve_metadata = _legacy_response.to_raw_response_wrapper(
files.retrieve_metadata,
)
self.upload = _legacy_response.to_raw_response_wrapper(
files.upload,
)
| FilesWithRawResponse |
python | kamyu104__LeetCode-Solutions | Python/put-marbles-in-bags.py | {
"start": 79,
"end": 1694
} | class ____(object):
def putMarbles(self, weights, k):
"""
:type weights: List[int]
:type k: int
:rtype: int
"""
def nth_element(nums, n, left=0, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
right = len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
for i in xrange(len(weights)-1):
weights[i] += weights[i+1]
weights.pop()
result = 0
nth_element(weights, (k-1)-1, compare=lambda a, b: a > b)
result += sum(weights[i] for i in xrange(k-1))
nth_element(weights, (k-1)-1)
result -= sum(weights[i] for i in xrange(k-1))
return result
| Solution |
python | Farama-Foundation__Gymnasium | gymnasium/core.py | {
"start": 27979,
"end": 29657
} | class ____(Wrapper[ObsType, WrapperActType, ObsType, ActType]):
"""Superclass of wrappers that can modify the action before :meth:`step`.
If you would like to apply a function to the action before passing it to the base environment,
you can simply inherit from :class:`ActionWrapper` and overwrite the method :meth:`action` to implement
that transformation. The transformation defined in that method must take values in the base environmentβs
action space. However, its domain might differ from the original action space.
In that case, you need to specify the new action space of the wrapper by setting :attr:`action_space` in
the :meth:`__init__` method of your wrapper.
Among others, Gymnasium provides the action wrappers :class:`gymnasium.wrappers.ClipAction` and
:class:`gymnasium.wrappers.RescaleAction` for clipping and rescaling actions.
"""
def __init__(self, env: Env[ObsType, ActType]):
"""Constructor for the action wrapper.
Args:
env: Environment to be wrapped.
"""
Wrapper.__init__(self, env)
def step(
self, action: WrapperActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Runs the :attr:`env` :meth:`env.step` using the modified ``action`` from :meth:`self.action`."""
return self.env.step(self.action(action))
def action(self, action: WrapperActType) -> ActType:
"""Returns a modified action before :meth:`step` is called.
Args:
action: The original :meth:`step` actions
Returns:
The modified actions
"""
raise NotImplementedError
| ActionWrapper |
python | viewflow__viewflow | viewflow/workflow/flow/views/detail.py | {
"start": 3222,
"end": 3697
} | class ____(DetailTaskView):
def get_template_names(self):
flow_task = self.activation.flow_task
opts = self.activation.flow_task.flow_class.instance
return (
"{}/{}/{}_detail.html".format(
opts.app_label, opts.flow_label, flow_task.name
),
"{}/{}/subprocess_detail.html".format(opts.app_label, opts.flow_label),
"viewflow/workflow/subprocess_detail.html",
)
| DetailSubprocessView |
python | scipy__scipy | scipy/interpolate/_bsplines.py | {
"start": 2227,
"end": 90686
} | class ____:
r"""Univariate spline in the B-spline basis.
.. math::
S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
and knots `t`.
Parameters
----------
t : ndarray, shape (n+k+1,)
knots
c : ndarray, shape (>=n, ...)
spline coefficients
k : int
B-spline degree
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
or to return nans.
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
If 'periodic', periodic extrapolation is used.
Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
t : ndarray
knot vector
c : ndarray
spline coefficients
k : int
spline degree
extrapolate : bool
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
axis : int
Interpolation axis.
tck : tuple
A read-only equivalent of ``(self.t, self.c, self.k)``
Methods
-------
__call__
basis_element
derivative
antiderivative
integrate
insert_knot
construct_fast
design_matrix
from_power_basis
Notes
-----
B-spline basis elements are defined via
.. math::
B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
**Implementation details**
- At least ``k+1`` coefficients are required for a spline of degree `k`,
so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
``j > n``, are ignored.
- B-spline basis elements of degree `k` form a partition of unity on the
*base interval*, ``t[k] <= x <= t[n]``.
Examples
--------
Translating the recursive definition of B-splines into Python code, we have:
>>> def B(x, k, i, t):
... if k == 0:
... return 1.0 if t[i] <= x < t[i+1] else 0.0
... if t[i+k] == t[i]:
... c1 = 0.0
... else:
... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
... if t[i+k+1] == t[i+1]:
... c2 = 0.0
... else:
... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
... return c1 + c2
>>> def bspline(x, t, c, k):
... n = len(t) - k - 1
... assert (n >= k+1) and (len(c) >= n)
... return sum(c[i] * B(x, k, i, t) for i in range(n))
Note that this is an inefficient (if straightforward) way to
evaluate B-splines --- this spline class does it in an equivalent,
but much more efficient way.
Here we construct a quadratic spline function on the base interval
``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
>>> from scipy.interpolate import BSpline
>>> k = 2
>>> t = [0, 1, 2, 3, 4, 5, 6]
>>> c = [-1, 2, 0, -1]
>>> spl = BSpline(t, c, k)
>>> spl(2.5)
array(1.375)
>>> bspline(2.5, t, c, k)
1.375
Note that outside of the base interval results differ. This is because
`BSpline` extrapolates the first and last polynomial pieces of B-spline
functions active on the base interval.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> fig, ax = plt.subplots()
>>> xx = np.linspace(1.5, 4.5, 50)
>>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
>>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
>>> ax.grid(True)
>>> ax.legend(loc='best')
>>> plt.show()
References
----------
.. [1] Tom Lyche and Knut Morken, Spline methods,
http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
.. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
"""
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
def __init__(self, t, c, k, extrapolate=True, axis=0):
super().__init__()
self._asarray = array_namespace(c, t).asarray
self.k = operator.index(k)
self._c = np.asarray(c)
self._t = np.ascontiguousarray(t, dtype=np.float64)
if extrapolate == 'periodic':
self.extrapolate = extrapolate
else:
self.extrapolate = bool(extrapolate)
n = self._t.shape[0] - self.k - 1
axis = normalize_axis_index(axis, self._c.ndim)
# Note that the normalized axis is stored in the object.
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (n, ...),
# and axis !=0 means that we have c.shape (..., n, ...)
# ^
# axis
self._c = np.moveaxis(self._c, axis, 0)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if self._t.ndim != 1:
raise ValueError("Knot vector must be one-dimensional.")
if n < self.k + 1:
raise ValueError(f"Need at least {2*k + 2} knots for degree {k}")
if (np.diff(self._t) < 0).any():
raise ValueError("Knots must be in a non-decreasing order.")
if len(np.unique(self._t[k:n+1])) < 2:
raise ValueError("Need at least two internal knots.")
if not np.isfinite(self._t).all():
raise ValueError("Knots should not have nans or infs.")
if self._c.ndim < 1:
raise ValueError("Coefficients must be at least 1-dimensional.")
if self._c.shape[0] < n:
raise ValueError("Knots, coefficients and degree are inconsistent.")
dt = _get_dtype(self._c.dtype)
self._c = np.ascontiguousarray(self._c, dtype=dt)
@classmethod
def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
"""Construct a spline without making checks.
Accepts same parameters as the regular constructor. Input arrays
`t` and `c` must of correct shape and dtype.
"""
self = object.__new__(cls)
self._t, self._c, self.k = np.asarray(t), np.asarray(c), k
self.extrapolate = extrapolate
self.axis = axis
self._asarray = array_namespace(t, c).asarray
return self
@property
def tck(self):
"""Equivalent to ``(self.t, self.c, self.k)`` (read-only).
"""
return self.t, self.c, self.k
# Under the hood, self._c and self._t are always saved as numpy array
# because they are used in a C extension expecting numpy arrays.
@property
def t(self):
return self._asarray(self._t)
@t.setter
def t(self, t):
self._t = np.asarray(t)
@property
def c(self):
return self._asarray(self._c)
@c.setter
def c(self, c):
self._c = np.asarray(c)
@classmethod
def basis_element(cls, t, extrapolate=True):
"""Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
Parameters
----------
t : ndarray, shape (k+2,)
internal knots
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
or to return nans.
If 'periodic', periodic extrapolation is used.
Default is True.
Returns
-------
basis_element : callable
A callable representing a B-spline basis element for the knot
vector `t`.
Notes
-----
The degree of the B-spline, `k`, is inferred from the length of `t` as
``len(t)-2``. The knot vector is constructed by appending and prepending
``k+1`` elements to internal knots `t`.
Examples
--------
Construct a cubic B-spline:
>>> import numpy as np
>>> from scipy.interpolate import BSpline
>>> b = BSpline.basis_element([0, 1, 2, 3, 4])
>>> k = b.k
>>> b.t[k:-k]
array([ 0., 1., 2., 3., 4.])
>>> k
3
Construct a quadratic B-spline on ``[0, 1, 1, 2]``, and compare
to its explicit form:
>>> t = [0, 1, 1, 2]
>>> b = BSpline.basis_element(t)
>>> def f(x):
... return np.where(x < 1, x*x, (2. - x)**2)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0, 2, 51)
>>> ax.plot(x, b(x), 'g', lw=3)
>>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
>>> ax.grid(True)
>>> plt.show()
"""
xp = array_namespace(t)
t = np.asarray(t)
k = t.shape[0] - 2
t = _as_float_array(t) # TODO: use concat_1d instead of np.r_
t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
c = np.zeros_like(t)
c[k] = 1.
t, c = xp.asarray(t), xp.asarray(c)
return cls.construct_fast(t, c, k, extrapolate)
@classmethod
def design_matrix(cls, x, t, k, extrapolate=False):
"""
Returns a design matrix as a CSR format sparse array.
Parameters
----------
x : array_like, shape (n,)
Points to evaluate the spline at.
t : array_like, shape (nt,)
Sorted 1D array of knots.
k : int
B-spline degree.
extrapolate : bool or 'periodic', optional
Whether to extrapolate based on the first and last intervals
or raise an error. If 'periodic', periodic extrapolation is used.
Default is False.
.. versionadded:: 1.10.0
Returns
-------
design_matrix : `csr_array` object
Sparse matrix in CSR format where each row contains all the basis
elements of the input row (first row = basis elements of x[0],
..., last row = basis elements x[-1]).
Examples
--------
Construct a design matrix for a B-spline
>>> from scipy.interpolate import make_interp_spline, BSpline
>>> import numpy as np
>>> x = np.linspace(0, np.pi * 2, 4)
>>> y = np.sin(x)
>>> k = 3
>>> bspl = make_interp_spline(x, y, k=k)
>>> design_matrix = bspl.design_matrix(x, bspl.t, k)
>>> design_matrix.toarray()
[[1. , 0. , 0. , 0. ],
[0.2962963 , 0.44444444, 0.22222222, 0.03703704],
[0.03703704, 0.22222222, 0.44444444, 0.2962963 ],
[0. , 0. , 0. , 1. ]]
Construct a design matrix for some vector of knots
>>> k = 2
>>> t = [-1, 0, 1, 2, 3, 4, 5, 6]
>>> x = [1, 2, 3, 4]
>>> design_matrix = BSpline.design_matrix(x, t, k).toarray()
>>> design_matrix
[[0.5, 0.5, 0. , 0. , 0. ],
[0. , 0.5, 0.5, 0. , 0. ],
[0. , 0. , 0.5, 0.5, 0. ],
[0. , 0. , 0. , 0.5, 0.5]]
This result is equivalent to the one created in the sparse format
>>> c = np.eye(len(t) - k - 1)
>>> design_matrix_gh = BSpline(t, c, k)(x)
>>> np.allclose(design_matrix, design_matrix_gh, atol=1e-14)
True
Notes
-----
.. versionadded:: 1.8.0
In each row of the design matrix all the basis elements are evaluated
at the certain point (first row - x[0], ..., last row - x[-1]).
`nt` is a length of the vector of knots: as far as there are
`nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2`
to have at least `k + 1` basis element.
Out of bounds `x` raises a ValueError.
"""
x = _as_float_array(x, True)
t = _as_float_array(t, True)
if extrapolate != 'periodic':
extrapolate = bool(extrapolate)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if t.ndim != 1 or np.any(t[1:] < t[:-1]):
raise ValueError(f"Expect t to be a 1-D sorted array_like, but "
f"got t={t}.")
# There are `nt - k - 1` basis elements in a BSpline built on the
# vector of knots with length `nt`, so to have at least `k + 1` basis
# elements we need to have at least `2 * k + 2` elements in the vector
# of knots.
if len(t) < 2 * k + 2:
raise ValueError(f"Length t is not enough for k={k}.")
if extrapolate == 'periodic':
# With periodic extrapolation we map x to the segment
# [t[k], t[n]].
n = t.size - k - 1
x = t[k] + (x - t[k]) % (t[n] - t[k])
extrapolate = False
elif not extrapolate and (
(min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1])
):
# Checks from `find_interval` function
raise ValueError(f'Out of bounds w/ x = {x}.')
# Compute number of non-zeros of final CSR array in order to determine
# the dtype of indices and indptr of the CSR array.
n = x.shape[0]
nnz = n * (k + 1)
if nnz < np.iinfo(np.int32).max:
int_dtype = np.int32
else:
int_dtype = np.int64
# Get the non-zero elements of the design matrix and per-row `offsets`:
# In row `i`, k+1 nonzero elements are consecutive, and start from `offset[i]`
data, offsets, _ = _dierckx.data_matrix(x, t, k, np.ones_like(x), extrapolate)
data = data.ravel()
if offsets.dtype != int_dtype:
offsets = offsets.astype(int_dtype)
# Convert from per-row offsets to the CSR indices/indptr format
indices = np.repeat(offsets, k+1).reshape(-1, k+1)
indices = indices + np.arange(k+1, dtype=int_dtype)
indices = indices.ravel()
indptr = np.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype)
return csr_array(
(data, indices, indptr),
shape=(x.shape[0], t.shape[0] - k - 1)
)
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate a spline function.
Parameters
----------
x : array_like
points to evaluate the spline at.
nu : int, optional
derivative to evaluate (default is 0).
extrapolate : bool or 'periodic', optional
whether to extrapolate based on the first and last intervals
or return nans. If 'periodic', periodic extrapolation is used.
Default is `self.extrapolate`.
Returns
-------
y : array_like
Shape is determined by replacing the interpolation axis
in the coefficient array with the shape of `x`.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float64)
# With periodic extrapolation we map x to the segment
# [self.t[k], self.t[n]].
if extrapolate == 'periodic':
n = self._t.size - self.k - 1
x = self._t[self.k] + (x - self._t[self.k]) % (self._t[n] - self._t[self.k])
extrapolate = False
self._ensure_c_contiguous()
# if self.c is complex: the C code in _dierckxmodule.cc expects
# floats, so make a view---this expands the last axis, and
# the view is C contiguous if the original is.
# if c.dtype is complex of shape (n,), c.view(float).shape == (2*n,)
# if c.dtype is complex of shape (n, m), c.view(float).shape == (n, 2*m)
is_complex = self._c.dtype.kind == 'c'
if is_complex:
cc = self._c.view(float)
if self._c.ndim == 1:
cc = cc.reshape(self._c.shape[0], 2)
else:
cc = self._c
# flatten the trailing dims
cc = cc.reshape(cc.shape[0], -1)
# heavy lifting: actually perform the evaluations
out = _dierckx.evaluate_spline(self._t, cc, self.k, x, nu, extrapolate)
if is_complex:
out = out.view(complex)
out = out.reshape(x_shape + self._c.shape[1:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return self._asarray(out)
def _ensure_c_contiguous(self):
"""
c and t may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self._t.flags.c_contiguous:
self._t = self._t.copy()
if not self._c.flags.c_contiguous:
self._c = self._c.copy()
def derivative(self, nu=1):
"""Return a B-spline representing the derivative.
Parameters
----------
nu : int, optional
Derivative order.
Default is 1.
Returns
-------
b : `BSpline` object
A new instance representing the derivative.
See Also
--------
splder, splantider
"""
c = self._asarray(self.c, copy=True)
t = self.t
xp = array_namespace(t, c)
# pad the c array if needed
ct = t.shape[0] - c.shape[0]
if ct > 0:
c = concat_1d(xp, c, xp.zeros((ct,) + c.shape[1:]))
tck = _fitpack_impl.splder((t, c, self.k), nu)
return self.construct_fast(*tck, extrapolate=self.extrapolate,
axis=self.axis)
def antiderivative(self, nu=1):
"""Return a B-spline representing the antiderivative.
Parameters
----------
nu : int, optional
Antiderivative order. Default is 1.
Returns
-------
b : `BSpline` object
A new instance representing the antiderivative.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
See Also
--------
splder, splantider
"""
c = self._asarray(self.c, copy=True)
t = self.t
xp = array_namespace(t, c)
# pad the c array if needed
ct = t.shape[0] - c.shape[0]
if ct > 0:
c = concat_1d(xp, c, xp.zeros((ct,) + c.shape[1:]))
tck = _fitpack_impl.splantider((t, c, self.k), nu)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(*tck, extrapolate=extrapolate,
axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""Compute a definite integral of the spline.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval,
``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
base interval. If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
I : array_like
Definite integral of the spline over the interval ``[a, b]``.
Examples
--------
Construct the linear spline ``x if x < 1 else 2 - x`` on the base
interval :math:`[0, 2]`, and integrate it
>>> from scipy.interpolate import BSpline
>>> b = BSpline.basis_element([0, 1, 2])
>>> b.integrate(0, 1)
array(0.5)
If the integration limits are outside of the base interval, the result
is controlled by the `extrapolate` parameter
>>> b.integrate(-1, 1)
array(0.0)
>>> b.integrate(-1, 1, extrapolate=False)
array(0.5)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.grid(True)
>>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval
>>> ax.axvline(2, c='r', lw=5, alpha=0.5)
>>> xx = [-1, 1, 2]
>>> ax.plot(xx, b(xx))
>>> plt.show()
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Prepare self.t and self.c.
self._ensure_c_contiguous()
# Swap integration bounds if needed.
sign = 1
if b < a:
a, b = b, a
sign = -1
n = self._t.size - self.k - 1
if extrapolate != "periodic" and not extrapolate:
# Shrink the integration interval, if needed.
a = max(a, self._t[self.k])
b = min(b, self._t[n])
if self._c.ndim == 1:
# Fast path: use FITPACK's routine
# (cf _fitpack_impl.splint).
integral = _fitpack_impl.splint(a, b, (self._t, self._c, self.k))
return self._asarray(integral * sign)
# Compute the antiderivative.
c = self._c
ct = len(self._t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
ta, ca, ka = _fitpack_impl.splantider((self._t, c, self.k), 1)
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
ts, te = self._t[self.k], self._t[n]
period = te - ts
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
# Evaluate the difference of antiderivatives.
x = np.asarray([ts, te], dtype=np.float64)
out = _dierckx.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False)
integral = out[1] - out[0]
integral *= n_periods
else:
integral = np.zeros((1, prod(self._c.shape[1:])),
dtype=self._c.dtype)
# Map a to [ts, te], b is always a + left.
a = ts + (a - ts) % period
b = a + left
# If b <= te then we need to integrate over [a, b], otherwise
# over [a, te] and from xs to what is remained.
if b <= te:
x = np.asarray([a, b], dtype=np.float64)
out = _dierckx.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False)
integral += out[1] - out[0]
else:
x = np.asarray([a, te], dtype=np.float64)
out = _dierckx.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False)
integral += out[1] - out[0]
x = np.asarray([ts, ts + b - te], dtype=np.float64)
out = _dierckx.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False)
integral += out[1] - out[0]
else:
# Evaluate the difference of antiderivatives.
x = np.asarray([a, b], dtype=np.float64)
out = _dierckx.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, extrapolate)
integral = out[1] - out[0]
integral *= sign
return self._asarray(integral.reshape(ca.shape[1:]))
@classmethod
def from_power_basis(cls, pp, bc_type='not-a-knot'):
r"""
Construct a polynomial in the B-spline basis
from a piecewise polynomial in the power basis.
For now, accepts ``CubicSpline`` instances only.
Parameters
----------
pp : CubicSpline
A piecewise polynomial in the power basis, as created
by ``CubicSpline``
bc_type : string, optional
Boundary condition type as in ``CubicSpline``: one of the
``not-a-knot``, ``natural``, ``clamped``, or ``periodic``.
Necessary for construction an instance of ``BSpline`` class.
Default is ``not-a-knot``.
Returns
-------
b : `BSpline` object
A new instance representing the initial polynomial
in the B-spline basis.
Notes
-----
.. versionadded:: 1.8.0
Accepts only ``CubicSpline`` instances for now.
The algorithm follows from differentiation
the Marsden's identity [1]: each of coefficients of spline
interpolation function in the B-spline basis is computed as follows:
.. math::
c_j = \sum_{m=0}^{k} \frac{(k-m)!}{k!}
c_{m,i} (-1)^{k-m} D^m p_{j,k}(x_i)
:math:`c_{m, i}` - a coefficient of CubicSpline,
:math:`D^m p_{j, k}(x_i)` - an m-th defivative of a dual polynomial
in :math:`x_i`.
``k`` always equals 3 for now.
First ``n - 2`` coefficients are computed in :math:`x_i = x_j`, e.g.
.. math::
c_1 = \sum_{m=0}^{k} \frac{(k-1)!}{k!} c_{m,1} D^m p_{j,3}(x_1)
Last ``nod + 2`` coefficients are computed in ``x[-2]``,
``nod`` - number of derivatives at the ends.
For example, consider :math:`x = [0, 1, 2, 3, 4]`,
:math:`y = [1, 1, 1, 1, 1]` and bc_type = ``natural``
The coefficients of CubicSpline in the power basis:
:math:`[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]`
The knot vector: :math:`t = [0, 0, 0, 0, 1, 2, 3, 4, 4, 4, 4]`
In this case
.. math::
c_j = \frac{0!}{k!} c_{3, i} k! = c_{3, i} = 1,~j = 0, ..., 6
References
----------
.. [1] Tom Lyche and Knut Morken, Spline Methods, 2005, Section 3.1.2
"""
from ._cubic import CubicSpline
if not isinstance(pp, CubicSpline):
raise NotImplementedError(f"Only CubicSpline objects are accepted "
f"for now. Got {type(pp)} instead.")
x = pp.x
coef = pp.c
k = pp.c.shape[0] - 1
n = x.shape[0]
if bc_type == 'not-a-knot':
t = _not_a_knot(x, k)
elif bc_type == 'natural' or bc_type == 'clamped':
t = _augknt(x, k)
elif bc_type == 'periodic':
t = _periodic_knots(x, k)
else:
raise TypeError(f'Unknown boundary condition: {bc_type}')
nod = t.shape[0] - (n + k + 1) # number of derivatives at the ends
c = np.zeros(n + nod, dtype=pp.c.dtype)
for m in range(k + 1):
for i in range(n - 2):
c[i] += poch(k + 1, -m) * coef[m, i]\
* np.power(-1, k - m)\
* _diff_dual_poly(i, k, x[i], m, t)
for j in range(n - 2, n + nod):
c[j] += poch(k + 1, -m) * coef[m, n - 2]\
* np.power(-1, k - m)\
* _diff_dual_poly(j, k, x[n - 2], m, t)
return cls.construct_fast(t, c, k, pp.extrapolate, pp.axis)
def insert_knot(self, x, m=1):
"""Insert a new knot at `x` of multiplicity `m`.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
Parameters
----------
x : float
The position of the new knot
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
Returns
-------
spl : `BSpline` object
A new `BSpline` object with the new knot inserted.
Notes
-----
Based on algorithms from [1]_ and [2]_.
In case of a periodic spline (``self.extrapolate == "periodic"``)
there must be either at least k interior knots t(j) satisfying
``t(k+1)<t(j)<=x`` or at least k interior knots t(j) satisfying
``x<=t(j)<t(n-k)``.
This routine is functionally equivalent to `scipy.interpolate.insert`.
.. versionadded:: 1.13
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
:doi:`10.1016/0010-4485(80)90154-2`.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
See Also
--------
scipy.interpolate.insert
Examples
--------
You can insert knots into a B-spline:
>>> import numpy as np
>>> from scipy.interpolate import BSpline, make_interp_spline
>>> x = np.linspace(0, 10, 5)
>>> y = np.sin(x)
>>> spl = make_interp_spline(x, y, k=3)
>>> spl.t
array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
Insert a single knot
>>> spl_1 = spl.insert_knot(3)
>>> spl_1.t
array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
Insert a multiple knot
>>> spl_2 = spl.insert_knot(8, m=3)
>>> spl_2.t
array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
"""
x = float(x)
if x < self._t[self.k] or x > self._t[-self.k-1]:
raise ValueError(f"Cannot insert a knot at {x}.")
if m <= 0:
raise ValueError(f"`m` must be positive, got {m = }.")
tt = self._t.copy()
cc = self._c.copy()
for _ in range(m):
tt, cc = _insert(x, tt, cc, self.k, self.extrapolate == "periodic")
tt, cc = self._asarray(tt), self._asarray(cc)
return self.construct_fast(tt, cc, self.k, self.extrapolate, self.axis)
def _insert(xval, t, c, k, periodic=False):
"""Insert a single knot at `xval`."""
#
# This is a port of the FORTRAN `insert` routine by P. Dierckx,
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/insert.f
# which carries the following comment:
#
# subroutine insert inserts a new knot x into a spline function s(x)
# of degree k and calculates the b-spline representation of s(x) with
# respect to the new set of knots. in addition, if iopt.ne.0, s(x)
# will be considered as a periodic spline with period per=t(n-k)-t(k+1)
# satisfying the boundary constraints
# t(i+n-2*k-1) = t(i)+per ,i=1,2,...,2*k+1
# c(i+n-2*k-1) = c(i) ,i=1,2,...,k
# in that case, the knots and b-spline coefficients returned will also
# satisfy these boundary constraints, i.e.
# tt(i+nn-2*k-1) = tt(i)+per ,i=1,2,...,2*k+1
# cc(i+nn-2*k-1) = cc(i) ,i=1,2,...,k
interval = _dierckx.find_interval(t, k, float(xval), k, False)
if interval < 0:
# extrapolated values are guarded for in BSpline.insert_knot
raise ValueError(f"Cannot insert the knot at {xval}.")
# super edge case: a knot with multiplicity > k+1
# see https://github.com/scipy/scipy/commit/037204c3e91
if t[interval] == t[interval + k + 1]:
interval -= 1
if periodic:
if (interval + 1 <= 2*k) and (interval + 1 >= t.shape[0] - 2*k):
# in case of a periodic spline (iopt.ne.0) there must be
# either at least k interior knots t(j) satisfying t(k+1)<t(j)<=x
# or at least k interior knots t(j) satisfying x<=t(j)<t(n-k)
raise ValueError("Not enough internal knots.")
# knots
tt = np.r_[t[:interval+1], xval, t[interval+1:]]
newshape = (c.shape[0] + 1,) + c.shape[1:]
cc = np.zeros(newshape, dtype=c.dtype)
# coefficients
cc[interval+1:, ...] = c[interval:, ...]
for i in range(interval, interval-k, -1):
fac = (xval - tt[i]) / (tt[i+k+1] - tt[i])
cc[i, ...] = fac*c[i, ...] + (1. - fac)*c[i-1, ...]
cc[:interval - k+1, ...] = c[:interval - k+1, ...]
if periodic:
# c incorporate the boundary conditions for a periodic spline.
n = tt.shape[0]
nk = n - k - 1
n2k = n - 2*k - 1
T = tt[nk] - tt[k] # period
if interval >= nk - k:
# adjust the left-hand boundary knots & coefs
tt[:k] = tt[nk - k:nk] - T
cc[:k, ...] = cc[n2k:n2k + k, ...]
if interval <= 2*k-1:
# adjust the right-hand boundary knots & coefs
tt[n-k:] = tt[k+1:k+1+k] + T
cc[n2k:n2k + k, ...] = cc[:k, ...]
return tt, cc
#################################
# Interpolating spline helpers #
#################################
def _not_a_knot(x, k):
"""Given data x, construct the knot vector w/ not-a-knot BC.
cf de Boor, XIII(12).
For even k, it's a bit ad hoc: Greville sites + omit 2nd and 2nd-to-last
data points, a la not-a-knot.
This seems to match what Dierckx does, too:
https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L63-L80
"""
x = np.asarray(x)
if k % 2 == 1:
k2 = (k + 1) // 2
t = x.copy()
else:
k2 = k // 2
t = (x[1:] + x[:-1]) / 2
t = t[k2:-k2]
t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
return t
def _augknt(x, k):
"""Construct a knot vector appropriate for the order-k interpolation."""
return np.r_[(x[0],)*k, x, (x[-1],)*k]
def _convert_string_aliases(deriv, target_shape):
if isinstance(deriv, str):
if deriv == "clamped":
deriv = [(1, np.zeros(target_shape))]
elif deriv == "natural":
deriv = [(2, np.zeros(target_shape))]
else:
raise ValueError(f"Unknown boundary condition : {deriv}")
return deriv
def _process_deriv_spec(deriv):
if deriv is not None:
try:
ords, vals = zip(*deriv)
except TypeError as e:
msg = ("Derivatives, `bc_type`, should be specified as a pair of "
"iterables of pairs of (order, value).")
raise ValueError(msg) from e
else:
ords, vals = [], []
return np.atleast_1d(ords, vals)
def _woodbury_algorithm(A, ur, ll, b, k):
'''
Solve a cyclic banded linear system with upper right
and lower blocks of size ``(k-1) / 2`` using
the Woodbury formula
Parameters
----------
A : 2-D array, shape(k, n)
Matrix of diagonals of original matrix (see
``solve_banded`` documentation).
ur : 2-D array, shape(bs, bs)
Upper right block matrix.
ll : 2-D array, shape(bs, bs)
Lower left block matrix.
b : 1-D array, shape(n,)
Vector of constant terms of the system of linear equations.
k : int
B-spline degree.
Returns
-------
c : 1-D array, shape(n,)
Solution of the original system of linear equations.
Notes
-----
This algorithm works only for systems with banded matrix A plus
a correction term U @ V.T, where the matrix U @ V.T gives upper right
and lower left block of A
The system is solved with the following steps:
1. New systems of linear equations are constructed:
A @ z_i = u_i,
u_i - column vector of U,
i = 1, ..., k - 1
2. Matrix Z is formed from vectors z_i:
Z = [ z_1 | z_2 | ... | z_{k - 1} ]
3. Matrix H = (1 + V.T @ Z)^{-1}
4. The system A' @ y = b is solved
5. x = y - Z @ (H @ V.T @ y)
Also, ``n`` should be greater than ``k``, otherwise corner block
elements will intersect with diagonals.
Examples
--------
Consider the case of n = 8, k = 5 (size of blocks - 2 x 2).
The matrix of a system: U: V:
x x x * * a b a b 0 0 0 0 1 0
x x x x * * c 0 c 0 0 0 0 0 1
x x x x x * * 0 0 0 0 0 0 0 0
* x x x x x * 0 0 0 0 0 0 0 0
* * x x x x x 0 0 0 0 0 0 0 0
d * * x x x x 0 0 d 0 1 0 0 0
e f * * x x x 0 0 e f 0 1 0 0
References
----------
.. [1] William H. Press, Saul A. Teukolsky, William T. Vetterling
and Brian P. Flannery, Numerical Recipes, 2007, Section 2.7.3
'''
k_mod = k - k % 2
bs = int((k - 1) / 2) + (k + 1) % 2
n = A.shape[1] + 1
U = np.zeros((n - 1, k_mod))
VT = np.zeros((k_mod, n - 1)) # V transpose
# upper right block
U[:bs, :bs] = ur
VT[np.arange(bs), np.arange(bs) - bs] = 1
# lower left block
U[-bs:, -bs:] = ll
VT[np.arange(bs) - bs, np.arange(bs)] = 1
Z = solve_banded((bs, bs), A, U)
H = solve(np.identity(k_mod) + VT @ Z, np.identity(k_mod))
y = solve_banded((bs, bs), A, b)
c = y - Z @ (H @ (VT @ y))
return c
def _periodic_knots(x, k):
'''
returns vector of nodes on circle
'''
xc = np.copy(x)
n = len(xc)
if k % 2 == 0:
dx = np.diff(xc)
xc[1: -1] -= dx[:-1] / 2
dx = np.diff(xc)
t = np.zeros(n + 2 * k)
t[k: -k] = xc
for i in range(0, k):
# filling first `k` elements in descending order
t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1]
# filling last `k` elements in ascending order
t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)]
return t
def _make_interp_per_full_matr(x, y, t, k):
'''
Returns a solution of a system for B-spline interpolation with periodic
boundary conditions. First ``k - 1`` rows of matrix are conditions of
periodicity (continuity of ``k - 1`` derivatives at the boundary points).
Last ``n`` rows are interpolation conditions.
RHS is ``k - 1`` zeros and ``n`` ordinates in this case.
Parameters
----------
x : 1-D array, shape (n,)
Values of x - coordinate of a given set of points.
y : 1-D array, shape (n,)
Values of y - coordinate of a given set of points.
t : 1-D array, shape(n+2*k,)
Vector of knots.
k : int
The maximum degree of spline
Returns
-------
c : 1-D array, shape (n+k-1,)
B-spline coefficients
Notes
-----
``t`` is supposed to be taken on circle.
'''
x, y, t = map(np.asarray, (x, y, t))
n = x.size
# LHS: the colocation matrix + derivatives at edges
matr = np.zeros((n + k - 1, n + k - 1))
# derivatives at x[0] and x[-1]:
for i in range(k - 1):
bb = _dierckx.evaluate_all_bspl(t, k, x[0], k, i + 1)
matr[i, : k + 1] += bb
bb = _dierckx.evaluate_all_bspl(t, k, x[-1], n + k - 1, i + 1)[:-1]
matr[i, -k:] -= bb
# colocation matrix
for i in range(n):
xval = x[i]
# find interval
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _dierckx.evaluate_all_bspl(t, k, xval, left)
matr[i + k - 1, left-k:left+1] = bb
# RHS
b = np.r_[[0] * (k - 1), y]
c = solve(matr, b)
return c
def _handle_lhs_derivatives(t, k, xval, ab, kl, ku, deriv_ords, offset=0):
""" Fill in the entries of the colocation matrix corresponding to known
derivatives at `xval`.
The colocation matrix is in the banded storage, as prepared by _coloc.
No error checking.
Parameters
----------
t : ndarray, shape (nt + k + 1,)
knots
k : integer
B-spline order
xval : float
The value at which to evaluate the derivatives at.
ab : ndarray, shape(2*kl + ku + 1, nt), Fortran order
B-spline colocation matrix.
This argument is modified *in-place*.
kl : integer
Number of lower diagonals of ab.
ku : integer
Number of upper diagonals of ab.
deriv_ords : 1D ndarray
Orders of derivatives known at xval
offset : integer, optional
Skip this many rows of the matrix ab.
"""
# find where `xval` is in the knot vector, `t`
left = _dierckx.find_interval(t, k, float(xval), k, False)
# compute and fill in the derivatives @ xval
for row in range(deriv_ords.shape[0]):
nu = deriv_ords[row]
wrk = _dierckx.evaluate_all_bspl(t, k, xval, left, nu)
# if A were a full matrix, it would be just
# ``A[row + offset, left-k:left+1] = bb``.
for a in range(k+1):
clmn = left - k + a
ab[kl + ku + offset + row - clmn, clmn] = wrk[a]
def _make_periodic_spline(x, y, t, k, axis, *, xp):
'''
Compute the (coefficients of) interpolating B-spline with periodic
boundary conditions.
Parameters
----------
x : array_like, shape (n,)
Abscissas.
y : array_like, shape (n,)
Ordinates.
k : int
B-spline degree.
t : array_like, shape (n + 2 * k,).
Knots taken on a circle, ``k`` on the left and ``k`` on the right
of the vector ``x``.
Returns
-------
b : `BSpline` object
A `BSpline` object of the degree ``k`` and with knots ``t``.
Notes
-----
The original system is formed by ``n + k - 1`` equations where the first
``k - 1`` of them stand for the ``k - 1`` derivatives continuity on the
edges while the other equations correspond to an interpolating case
(matching all the input points). Due to a special form of knot vector, it
can be proved that in the original system the first and last ``k``
coefficients of a spline function are the same, respectively. It follows
from the fact that all ``k - 1`` derivatives are equal term by term at ends
and that the matrix of the original system of linear equations is
non-degenerate. So, we can reduce the number of equations to ``n - 1``
(first ``k - 1`` equations could be reduced). Another trick of this
implementation is cyclic shift of values of B-splines due to equality of
``k`` unknown coefficients. With this we can receive matrix of the system
with upper right and lower left blocks, and ``k`` diagonals. It allows
to use Woodbury formula to optimize the computations.
'''
n = y.shape[0]
extradim = prod(y.shape[1:])
y_new = y.reshape(n, extradim)
c = np.zeros((n + k - 1, extradim))
# n <= k case is solved with full matrix
if n <= k:
for i in range(extradim):
c[:, i] = _make_interp_per_full_matr(x, y_new[:, i], t, k)
c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
t, c = xp.asarray(t), xp.asarray(c)
return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
nt = len(t) - k - 1
# size of block elements
kul = int(k / 2)
# kl = ku = k
ab = np.zeros((3 * k + 1, nt), dtype=np.float64, order='F')
# upper right and lower left blocks
ur = np.zeros((kul, kul))
ll = np.zeros_like(ur)
# `offset` is made to shift all the non-zero elements to the end of the
# matrix
# NB: 1. drop the last element of `x` because `x[0] = x[-1] + T` & `y[0] == y[-1]`
# 2. pass ab.T to _coloc to make it C-ordered; below it'll be fed to banded
# LAPACK, which needs F-ordered arrays
_dierckx._coloc(x[:-1], t, k, ab.T, k)
# remove zeros before the matrix
ab = ab[-k - (k + 1) % 2:, :]
# The least elements in rows (except repetitions) are diagonals
# of block matrices. Upper right matrix is an upper triangular
# matrix while lower left is a lower triangular one.
for i in range(kul):
ur += np.diag(ab[-i - 1, i: kul], k=i)
ll += np.diag(ab[i, -kul - (k % 2): n - 1 + 2 * kul - i], k=-i)
# remove elements that occur in the last point
# (first and last points are equivalent)
A = ab[:, kul: -k + kul]
for i in range(extradim):
cc = _woodbury_algorithm(A, ur, ll, y_new[:, i][:-1], k)
c[:, i] = np.concatenate((cc[-kul:], cc, cc[:kul + k % 2]))
c = np.ascontiguousarray(c.reshape((n + k - 1,) + y.shape[1:]))
t, c = xp.asarray(t), xp.asarray(c)
return BSpline.construct_fast(t, c, k, extrapolate='periodic', axis=axis)
@xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
check_finite=True):
"""Create an interpolating B-spline with specified degree and boundary conditions.
Parameters
----------
x : array_like, shape (n,)
Abscissas.
y : array_like, shape (n, ...)
Ordinates.
k : int, optional
B-spline degree. Default is cubic, ``k = 3``.
t : array_like, shape (nt + k + 1,), optional.
Knots.
The number of knots needs to agree with the number of data points and
the number of derivatives at the edges. Specifically, ``nt - n`` must
equal ``len(deriv_l) + len(deriv_r)``.
bc_type : 2-tuple or None
Boundary conditions.
Default is None, which means choosing the boundary conditions
automatically. Otherwise, it must be a length-two tuple where the first
element (``deriv_l``) sets the boundary conditions at ``x[0]`` and
the second element (``deriv_r``) sets the boundary conditions at
``x[-1]``. Each of these must be an iterable of pairs
``(order, value)`` which gives the values of derivatives of specified
orders at the given edge of the interpolation interval.
Alternatively, the following string aliases are recognized:
* ``"clamped"``: The first derivatives at the ends are zero. This is
equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``.
* ``"natural"``: The second derivatives at ends are zero. This is
equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``.
* ``"not-a-knot"`` (default): The first and second segments are the
same polynomial. This is equivalent to having ``bc_type=None``.
* ``"periodic"``: The values and the first ``k-1`` derivatives at the
ends are equivalent.
axis : int, optional
Interpolation axis. Default is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default is True.
Returns
-------
b : `BSpline` object
A `BSpline` object of the degree ``k`` and with knots ``t``.
See Also
--------
BSpline : base class representing the B-spline objects
CubicSpline : a cubic spline in the polynomial basis
make_lsq_spline : a similar factory function for spline fitting
UnivariateSpline : a wrapper over FITPACK spline fitting routines
splrep : a wrapper over FITPACK spline fitting routines
Examples
--------
Use cubic interpolation on Chebyshev nodes:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> def cheb_nodes(N):
... jj = 2.*np.arange(N) + 1
... x = np.cos(np.pi * jj / 2 / N)[::-1]
... return x
>>> x = cheb_nodes(20)
>>> y = np.sqrt(1 - x**2)
>>> from scipy.interpolate import BSpline, make_interp_spline
>>> b = make_interp_spline(x, y)
>>> np.allclose(b(x), y)
True
Note that the default is a cubic spline with a not-a-knot boundary condition
>>> b.k
3
Here we use a 'natural' spline, with zero 2nd derivatives at edges:
>>> l, r = [(2, 0.0)], [(2, 0.0)]
>>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type="natural"
>>> np.allclose(b_n(x), y)
True
>>> x0, x1 = x[0], x[-1]
>>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
True
Interpolation of parametric curves is also supported. As an example, we
compute a discretization of a snail curve in polar coordinates
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.3 + np.cos(phi)
>>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates
Build an interpolating curve, parameterizing it by the angle
>>> spl = make_interp_spline(phi, np.c_[x, y])
Evaluate the interpolant on a finer grid (note that we transpose the result
to unpack it into a pair of x- and y-arrays)
>>> phi_new = np.linspace(0, 2.*np.pi, 100)
>>> x_new, y_new = spl(phi_new).T
Plot the result
>>> plt.plot(x, y, 'o')
>>> plt.plot(x_new, y_new, '-')
>>> plt.show()
Build a B-spline curve with 2 dimensional y
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.array([np.sin(x), np.cos(x)])
Periodic condition is satisfied because y coordinates of points on the ends
are equivalent
>>> ax = plt.axes(projection='3d')
>>> xx = np.linspace(0, 2*np.pi, 100)
>>> bspl = make_interp_spline(x, y, k=5, bc_type='periodic', axis=1)
>>> ax.plot3D(xx, *bspl(xx))
>>> ax.scatter3D(x, *y, color='red')
>>> plt.show()
"""
# convert string aliases for the boundary conditions
if bc_type is None or bc_type == 'not-a-knot' or bc_type == 'periodic':
deriv_l, deriv_r = None, None
elif isinstance(bc_type, str):
deriv_l, deriv_r = bc_type, bc_type
else:
try:
deriv_l, deriv_r = bc_type
except TypeError as e:
raise ValueError(f"Unknown boundary condition: {bc_type}") from e
xp = array_namespace(x, y, t)
x = _as_float_array(x, check_finite)
y = _as_float_array(y, check_finite)
axis = normalize_axis_index(axis, y.ndim)
y = np.moveaxis(y, axis, 0) # now internally interp axis is zero
# sanity check the input
if bc_type == 'periodic' and not np.allclose(y[0], y[-1], atol=1e-15):
raise ValueError("First and last points does not match while "
"periodic case expected")
if x.size != y.shape[0]:
raise ValueError(f'Shapes of x {x.shape} and y {y.shape} are incompatible')
if np.any(x[1:] == x[:-1]):
raise ValueError("Expect x to not have duplicates")
if x.ndim != 1 or np.any(x[1:] < x[:-1]):
raise ValueError("Expect x to be a 1D strictly increasing sequence.")
# special-case k=0 right away
if k == 0:
if any(_ is not None for _ in (t, deriv_l, deriv_r)):
raise ValueError("Too much info for k=0: t and bc_type can only "
"be None.")
t = np.r_[x, x[-1]]
c = np.asarray(y)
c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
t, c = xp.asarray(t), xp.asarray(c)
return BSpline.construct_fast(t, c, k, axis=axis)
# special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
if k == 1 and t is None:
if not (deriv_l is None and deriv_r is None):
raise ValueError("Too much info for k=1: bc_type can only be None.")
t = np.r_[x[0], x, x[-1]]
c = np.asarray(y)
c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
t, c = xp.asarray(t), xp.asarray(c)
return BSpline.construct_fast(t, c, k, axis=axis)
k = operator.index(k)
if bc_type == 'periodic' and t is not None:
raise NotImplementedError("For periodic case t is constructed "
"automatically and can not be passed "
"manually")
# come up with a sensible knot vector, if needed
if t is None:
if deriv_l is None and deriv_r is None:
if bc_type == 'periodic':
t = _periodic_knots(x, k)
else:
t = _not_a_knot(x, k)
else:
t = _augknt(x, k)
t = _as_float_array(t, check_finite)
if k < 0:
raise ValueError("Expect non-negative k.")
if t.ndim != 1 or np.any(t[1:] < t[:-1]):
raise ValueError("Expect t to be a 1-D sorted array_like.")
if t.size < x.size + k + 1:
raise ValueError(f"Got {t.size} knots, need at least {x.size + k + 1}.")
if (x[0] < t[k]) or (x[-1] > t[-k]):
raise ValueError(f'Out of bounds w/ x = {x}.')
if bc_type == 'periodic':
return _make_periodic_spline(x, y, t, k, axis, xp=xp)
# Here : deriv_l, r = [(nu, value), ...]
deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])
deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l)
nleft = deriv_l_ords.shape[0]
deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])
deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r)
nright = deriv_r_ords.shape[0]
if not all(0 <= i <= k for i in deriv_l_ords):
raise ValueError(f"Bad boundary conditions at {x[0]}.")
if not all(0 <= i <= k for i in deriv_r_ords):
raise ValueError(f"Bad boundary conditions at {x[-1]}.")
# have `n` conditions for `nt` coefficients; need nt-n derivatives
n = x.size
nt = t.size - k - 1
if nt - n != nleft + nright:
raise ValueError("The number of derivatives at boundaries does not "
f"match: expected {nt-n}, got {nleft}+{nright}")
# bail out if the `y` array is zero-sized
if y.size == 0:
c = np.zeros((nt,) + y.shape[1:], dtype=float)
return BSpline.construct_fast(t, c, k, axis=axis)
# set up the LHS: the colocation matrix + derivatives at boundaries
# NB: ab is in F order for banded LAPACK; _coloc needs C-ordered arrays,
# this pass ab.T into _coloc
kl = ku = k
ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float64, order='F')
_dierckx._coloc(x, t, k, ab.T, nleft)
if nleft > 0:
_handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords)
if nright > 0:
_handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords,
offset=nt-nright)
# set up the RHS: values to interpolate (+ derivative values, if any)
extradim = prod(y.shape[1:])
rhs = np.empty((nt, extradim), dtype=y.dtype)
if nleft > 0:
rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
rhs[nleft:nt - nright] = y.reshape(-1, extradim)
if nright > 0:
rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
# solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
if check_finite:
ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
lu, piv, c, info = gbsv(kl, ku, ab, rhs,
overwrite_ab=True, overwrite_b=True)
if info > 0:
raise LinAlgError("Colocation matrix is singular.")
elif info < 0:
raise ValueError(f'illegal value in {-info}-th argument of internal gbsv')
c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
t, c = xp.asarray(t), xp.asarray(c)
return BSpline.construct_fast(t, c, k, axis=axis)
@xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True, *, method="qr"):
r"""Create a smoothing B-spline satisfying the Least SQuares (LSQ) criterion.
The result is a linear combination
.. math::
S(x) = \sum_j c_j B_j(x; t)
of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
.. math::
\sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
Parameters
----------
x : array_like, shape (m,)
Abscissas.
y : array_like, shape (m, ...)
Ordinates.
t : array_like, shape (n + k + 1,).
Knots.
Knots and data points must satisfy Schoenberg-Whitney conditions.
k : int, optional
B-spline degree. Default is cubic, ``k = 3``.
w : array_like, shape (m,), optional
Weights for spline fitting. Must be positive. If ``None``,
then weights are all equal.
Default is ``None``.
axis : int, optional
Interpolation axis. Default is zero.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default is True.
method : str, optional
Method for solving the linear LSQ problem. Allowed values are "norm-eq"
(Explicitly construct and solve the normal system of equations), and
"qr" (Use the QR factorization of the design matrix).
Default is "qr".
Returns
-------
b : `BSpline` object
A `BSpline` object of the degree ``k`` with knots ``t``.
See Also
--------
BSpline : base class representing the B-spline objects
make_interp_spline : a similar factory function for interpolating splines
LSQUnivariateSpline : a FITPACK-based spline fitting routine
splrep : a FITPACK-based fitting routine
Notes
-----
The number of data points must be larger than the spline degree ``k``.
Knots ``t`` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
Generate some noisy data:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(50)
Now fit a smoothing cubic spline with a pre-defined internal knots.
Here we make the knot vector (k+1)-regular by adding boundary knots:
>>> from scipy.interpolate import make_lsq_spline, BSpline
>>> t = [-1, 0, 1]
>>> k = 3
>>> t = np.r_[(x[0],)*(k+1),
... t,
... (x[-1],)*(k+1)]
>>> spl = make_lsq_spline(x, y, t, k)
For comparison, we also construct an interpolating spline for the same
set of data:
>>> from scipy.interpolate import make_interp_spline
>>> spl_i = make_interp_spline(x, y)
Plot both:
>>> xs = np.linspace(-3, 3, 100)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
>>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
>>> plt.legend(loc='best')
>>> plt.show()
**NaN handling**: If the input arrays contain ``nan`` values, the result is
not useful since the underlying spline fitting routines cannot deal with
``nan``. A workaround is to use zero weights for not-a-number data points:
>>> y[8] = np.nan
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> tck = make_lsq_spline(x, y, t, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
"""
xp = array_namespace(x, y, t, w)
x = _as_float_array(x, check_finite)
y = _as_float_array(y, check_finite)
t = _as_float_array(t, check_finite)
if w is not None:
w = _as_float_array(w, check_finite)
else:
w = np.ones_like(x)
k = operator.index(k)
axis = normalize_axis_index(axis, y.ndim)
y = np.moveaxis(y, axis, 0) # now internally interp axis is zero
if not y.flags.c_contiguous:
# C routines in _dierckx currently require C contiguity
y = y.copy(order='C')
if x.ndim != 1:
raise ValueError("Expect x to be a 1-D sequence.")
if x.shape[0] < k+1:
raise ValueError("Need more x points.")
if k < 0:
raise ValueError("Expect non-negative k.")
if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
raise ValueError("Expect t to be a 1D strictly increasing sequence.")
if x.size != y.shape[0]:
raise ValueError(f'Shapes of x {x.shape} and y {y.shape} are incompatible')
if k > 0 and np.any((x < t[k]) | (x > t[-k])):
raise ValueError(f'Out of bounds w/ x = {x}.')
if x.size != w.size:
raise ValueError(f'Shapes of x {x.shape} and w {w.shape} are incompatible')
if method == "norm-eq" and np.any(x[1:] - x[:-1] <= 0):
raise ValueError("Expect x to be a 1D strictly increasing sequence.")
if method == "qr" and any(x[1:] - x[:-1] < 0):
raise ValueError("Expect x to be a 1D non-decreasing sequence.")
# number of coefficients
n = t.size - k - 1
# complex y: view as float, preserve the length
was_complex = y.dtype.kind == 'c'
yy = y.view(float)
if was_complex and y.ndim == 1:
yy = yy.reshape(y.shape[0], 2)
# multiple r.h.s
extradim = prod(yy.shape[1:])
yy = yy.reshape(-1, extradim)
# complex y: view as float, preserve the length
was_complex = y.dtype.kind == 'c'
yy = y.view(float)
if was_complex and y.ndim == 1:
yy = yy.reshape(y.shape[0], 2)
# multiple r.h.s
extradim = prod(yy.shape[1:])
yy = yy.reshape(-1, extradim)
if method == "norm-eq":
# construct A.T @ A and rhs with A the colocation matrix, and
# rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``
lower = True
ab = np.zeros((k+1, n), dtype=np.float64, order='F')
rhs = np.zeros((n, extradim), dtype=np.float64)
_dierckx._norm_eq_lsq(x, t, k,
yy,
w,
ab.T, rhs)
# undo complex -> float and flattening the trailing dims
if was_complex:
rhs = rhs.view(complex)
rhs = rhs.reshape((n,) + y.shape[1:])
# have observation matrix & rhs, can solve the LSQ problem
cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
check_finite=check_finite)
m = rhs.shape[0]
c = cho_solve_banded((cho_decomp, lower), rhs.reshape(m, -1), overwrite_b=True,
check_finite=check_finite).reshape(rhs.shape)
elif method == "qr":
_, _, c, _, _ = _lsq_solve_qr(x, yy, t, k, w)
if was_complex:
c = c.view(complex)
else:
raise ValueError(f"Unknown {method =}.")
# restore the shape of `c` for both single and multiple r.h.s.
c = c.reshape((n,) + y.shape[1:])
c = np.ascontiguousarray(c)
t, c = xp.asarray(t), xp.asarray(c)
return BSpline.construct_fast(t, c, k, axis=axis)
######################
# LSQ spline helpers #
######################
def _lsq_solve_qr_for_root_rati_periodic(x, y, t, k, w):
"""Solve for the LSQ spline coeffs given x, y and knots.
`y` is always 2D: for 1D data, the shape is ``(m, 1)``.
`w` is always 1D: one weight value per `x` value.
"""
y_w = y * w[:, None]
# Ref: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L221-L238
R, H1, H2, offset, nc = _dierckx.data_matrix_periodic(x, t, k, w, False)
# Ref: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L239-L314
A1, A2, Z, p, _ = _dierckx.qr_reduce_periodic(
R, H1, H2, offset, nc, y_w, k,
len(t), True
) # modifies arguments in-place
# Ref: https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpbacp.f
c, residuals, _ = _dierckx.fpbacp(A1, A2, Z, k, k, x, y, t, w)
return R, A1, A2, Z, y_w, c, p, residuals
def _lsq_solve_qr(x, y, t, k, w, periodic=False):
"""Solve for the LSQ spline coeffs given x, y and knots.
`y` is always 2D: for 1D data, the shape is ``(m, 1)``.
`w` is always 1D: one weight value per `x` value.
"""
y_w = y * w[:, None]
if not periodic:
A, offset, nc = _dierckx.data_matrix(x, t, k, w)
_dierckx.qr_reduce(A, offset, nc, y_w) # modifies arguments in-place
c, residuals, fp = _dierckx.fpback(A, nc, x, y, t, k, w, y_w)
return A, y_w, c, fp, residuals
else:
# Ref: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L221-L238
R, H1, H2, offset, nc = _dierckx.data_matrix_periodic(x, t, k, w, False)
# Ref: https://github.com/scipy/scipy/blob/maintenance/1.16.x/scipy/interpolate/fitpack/fpperi.f#L239-L314
A1, A2, Z, fp = _dierckx.qr_reduce_periodic(
R, H1, H2, offset, nc, y_w, k,
len(t), False) # modifies arguments in-place
# Ref: https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpbacp.f
c, residuals, _ = _dierckx.fpbacp(A1, A2, Z, k, k, x, y, t, w)
return R, y_w, c, fp, residuals
#############################
# Smoothing spline helpers #
#############################
def _compute_optimal_gcv_parameter(X, wE, y, w):
"""
Returns an optimal regularization parameter from the GCV criteria [1].
Parameters
----------
X : array, shape (5, n)
5 bands of the design matrix ``X`` stored in LAPACK banded storage.
wE : array, shape (5, n)
5 bands of the penalty matrix :math:`W^{-1} E` stored in LAPACK banded
storage.
y : array, shape (n,)
Ordinates.
w : array, shape (n,)
Vector of weights.
Returns
-------
lam : float
An optimal from the GCV criteria point of view regularization
parameter.
Notes
-----
No checks are performed.
References
----------
.. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
for observational data, Philadelphia, Pennsylvania: Society for
Industrial and Applied Mathematics, 1990, pp. 45-65.
:doi:`10.1137/1.9781611970128`
"""
def compute_banded_symmetric_XT_W_Y(X, w, Y):
"""
Assuming that the product :math:`X^T W Y` is symmetric and both ``X``
and ``Y`` are 5-banded, compute the unique bands of the product.
Parameters
----------
X : array, shape (5, n)
5 bands of the matrix ``X`` stored in LAPACK banded storage.
w : array, shape (n,)
Array of weights
Y : array, shape (5, n)
5 bands of the matrix ``Y`` stored in LAPACK banded storage.
Returns
-------
res : array, shape (4, n)
The result of the product :math:`X^T Y` stored in the banded way.
Notes
-----
As far as the matrices ``X`` and ``Y`` are 5-banded, their product
:math:`X^T W Y` is 7-banded. It is also symmetric, so we can store only
unique diagonals.
"""
# compute W Y
W_Y = np.copy(Y)
W_Y[2] *= w
for i in range(2):
W_Y[i, 2 - i:] *= w[:-2 + i]
W_Y[3 + i, :-1 - i] *= w[1 + i:]
n = X.shape[1]
res = np.zeros((4, n))
for i in range(n):
for j in range(min(n-i, 4)):
res[-j-1, i + j] = sum(X[j:, i] * W_Y[:5-j, i + j])
return res
def compute_b_inv(A):
"""
Inverse 3 central bands of matrix :math:`A=U^T D^{-1} U` assuming that
``U`` is a unit upper triangular banded matrix using an algorithm
proposed in [1].
Parameters
----------
A : array, shape (4, n)
Matrix to inverse, stored in LAPACK banded storage.
Returns
-------
B : array, shape (4, n)
3 unique bands of the symmetric matrix that is an inverse to ``A``.
The first row is filled with zeros.
Notes
-----
The algorithm is based on the cholesky decomposition and, therefore,
in case matrix ``A`` is close to not positive defined, the function
raises LinalgError.
Both matrices ``A`` and ``B`` are stored in LAPACK banded storage.
References
----------
.. [1] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
spline functions," Numerische Mathematik, vol. 47, no. 1,
pp. 99-106, 1985.
:doi:`10.1007/BF01389878`
"""
def find_b_inv_elem(i, j, U, D, B):
rng = min(3, n - i - 1)
rng_sum = 0.
if j == 0:
# use 2-nd formula from [1]
for k in range(1, rng + 1):
rng_sum -= U[-k - 1, i + k] * B[-k - 1, i + k]
rng_sum += D[i]
B[-1, i] = rng_sum
else:
# use 1-st formula from [1]
for k in range(1, rng + 1):
diag = abs(k - j)
ind = i + min(k, j)
rng_sum -= U[-k - 1, i + k] * B[-diag - 1, ind + diag]
B[-j - 1, i + j] = rng_sum
U = cholesky_banded(A)
for i in range(2, 5):
U[-i, i-1:] /= U[-1, :-i+1]
D = 1. / (U[-1])**2
U[-1] /= U[-1]
n = U.shape[1]
B = np.zeros(shape=(4, n))
for i in range(n - 1, -1, -1):
for j in range(min(3, n - i - 1), -1, -1):
find_b_inv_elem(i, j, U, D, B)
# the first row contains garbage and should be removed
B[0] = [0.] * n
return B
def _gcv(lam, X, XtWX, wE, XtE, y):
r"""
Computes the generalized cross-validation criteria [1].
Parameters
----------
lam : float, (:math:`\lambda \geq 0`)
Regularization parameter.
X : array, shape (5, n)
Matrix is stored in LAPACK banded storage.
XtWX : array, shape (4, n)
Product :math:`X^T W X` stored in LAPACK banded storage.
wE : array, shape (5, n)
Matrix :math:`W^{-1} E` stored in LAPACK banded storage.
XtE : array, shape (4, n)
Product :math:`X^T E` stored in LAPACK banded storage.
Returns
-------
res : float
Value of the GCV criteria with the regularization parameter
:math:`\lambda`.
Notes
-----
Criteria is computed from the formula (1.3.2) [3]:
.. math:
GCV(\lambda) = \dfrac{1}{n} \sum\limits_{k = 1}^{n} \dfrac{ \left(
y_k - f_{\lambda}(x_k) \right)^2}{\left( 1 - \Tr{A}/n\right)^2}$.
The criteria is discussed in section 1.3 [3].
The numerator is computed using (2.2.4) [3] and the denominator is
computed using an algorithm from [2] (see in the ``compute_b_inv``
function).
References
----------
.. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models
for observational data, Philadelphia, Pennsylvania: Society for
Industrial and Applied Mathematics, 1990, pp. 45-65.
:doi:`10.1137/1.9781611970128`
.. [2] M. F. Hutchinson and F. R. de Hoog, "Smoothing noisy data with
spline functions," Numerische Mathematik, vol. 47, no. 1,
pp. 99-106, 1985.
:doi:`10.1007/BF01389878`
.. [3] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
BSc thesis, 2022. Might be available (in Russian)
`here <https://www.hse.ru/ba/am/students/diplomas/620910604>`_
"""
# Compute the numerator from (2.2.4) [3]
n = X.shape[1]
c = solve_banded((2, 2), X + lam * wE, y)
res = np.zeros(n)
# compute ``W^{-1} E c`` with respect to banded-storage of ``E``
tmp = wE * c
for i in range(n):
for j in range(max(0, i - n + 3), min(5, i + 3)):
res[i] += tmp[j, i + 2 - j]
numer = np.linalg.norm(lam * res)**2 / n
# compute the denominator
lhs = XtWX + lam * XtE
try:
b_banded = compute_b_inv(lhs)
# compute the trace of the product b_banded @ XtX
tr = b_banded * XtWX
tr[:-1] *= 2
# find the denominator
denom = (1 - sum(sum(tr)) / n)**2
except LinAlgError:
# cholesky decomposition cannot be performed
raise ValueError('Seems like the problem is ill-posed')
res = numer / denom
return res
n = X.shape[1]
XtWX = compute_banded_symmetric_XT_W_Y(X, w, X)
XtE = compute_banded_symmetric_XT_W_Y(X, w, wE)
if y.ndim == 1:
gcv_est = minimize_scalar(
_gcv, bounds=(0, n), method='Bounded', args=(X, XtWX, wE, XtE, y)
)
if gcv_est.success:
return gcv_est.x
raise ValueError(f"Unable to find minimum of the GCV "
f"function: {gcv_est.message}")
elif y.ndim == 2:
gcv_est = np.empty(y.shape[1])
for i in range(y.shape[1]):
est = minimize_scalar(
_gcv, bounds=(0, n), method='Bounded', args=(X, XtWX, wE, XtE, y[:, i])
)
if est.success:
gcv_est[i] = est.x
else:
raise ValueError(f"Unable to find minimum of the GCV "
f"function: {gcv_est.message}")
return gcv_est
else:
# trailing dims must have been flattened already.
raise RuntimeError("Internal error. Please report it to scipy developers.")
def _coeff_of_divided_diff(x):
"""
Returns the coefficients of the divided difference.
Parameters
----------
x : array, shape (n,)
Array which is used for the computation of divided difference.
Returns
-------
res : array_like, shape (n,)
Coefficients of the divided difference.
Notes
-----
Vector ``x`` should have unique elements, otherwise an error division by
zero might be raised.
No checks are performed.
"""
n = x.shape[0]
res = np.zeros(n)
for i in range(n):
pp = 1.
for k in range(n):
if k != i:
pp *= (x[i] - x[k])
res[i] = 1. / pp
return res
@xp_capabilities(cpu_only=True, jax_jit=False, allow_dask_compute=True)
def make_smoothing_spline(x, y, w=None, lam=None, *, axis=0):
r"""
Create a smoothing B-spline satisfying the Generalized Cross Validation (GCV) criterion.
Compute the (coefficients of) smoothing cubic spline function using
``lam`` to control the tradeoff between the amount of smoothness of the
curve and its proximity to the data. In case ``lam`` is None, using the
GCV criteria [1] to find it.
A smoothing spline is found as a solution to the regularized weighted
linear regression problem:
.. math::
\sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2 +
\lambda\int\limits_{x_1}^{x_n} (f^{(2)}(u))^2 d u
where :math:`f` is a spline function, :math:`w` is a vector of weights and
:math:`\lambda` is a regularization parameter.
If ``lam`` is None, we use the GCV criteria to find an optimal
regularization parameter, otherwise we solve the regularized weighted
linear regression problem with given parameter. The parameter controls
the tradeoff in the following way: the larger the parameter becomes, the
smoother the function gets.
Parameters
----------
x : array_like, shape (n,)
Abscissas. `n` must be at least 5.
y : array_like, shape (n, ...)
Ordinates. `n` must be at least 5.
w : array_like, shape (n,), optional
Vector of weights. Default is ``np.ones_like(x)``.
lam : float, (:math:`\lambda \geq 0`), optional
Regularization parameter. If ``lam`` is None, then it is found from
the GCV criteria. Default is None.
axis : int, optional
The data axis. Default is zero.
The assumption is that ``y.shape[axis] == n``, and all other axes of ``y``
are batching axes.
Returns
-------
func : `BSpline` object
An object representing a spline in the B-spline basis
as a solution of the problem of smoothing splines using
the GCV criteria [1] in case ``lam`` is None, otherwise using the
given parameter ``lam``.
Notes
-----
This algorithm is a clean room reimplementation of the algorithm
introduced by Woltring in FORTRAN [2]. The original version cannot be used
in SciPy source code because of the license issues. The details of the
reimplementation are discussed here (available only in Russian) [4].
If the vector of weights ``w`` is None, we assume that all the points are
equal in terms of weights, and vector of weights is vector of ones.
Note that in weighted residual sum of squares, weights are not squared:
:math:`\sum\limits_{i=1}^n w_i\lvert y_i - f(x_i) \rvert^2` while in
``splrep`` the sum is built from the squared weights.
In cases when the initial problem is ill-posed (for example, the product
:math:`X^T W X` where :math:`X` is a design matrix is not a positive
defined matrix) a ValueError is raised.
References
----------
.. [1] G. Wahba, "Estimating the smoothing parameter" in Spline models for
observational data, Philadelphia, Pennsylvania: Society for Industrial
and Applied Mathematics, 1990, pp. 45-65.
:doi:`10.1137/1.9781611970128`
.. [2] H. J. Woltring, A Fortran package for generalized, cross-validatory
spline smoothing and differentiation, Advances in Engineering
Software, vol. 8, no. 2, pp. 104-113, 1986.
:doi:`10.1016/0141-1195(86)90098-7`
.. [3] T. Hastie, J. Friedman, and R. Tisbshirani, "Smoothing Splines" in
The elements of Statistical Learning: Data Mining, Inference, and
prediction, New York: Springer, 2017, pp. 241-249.
:doi:`10.1007/978-0-387-84858-7`
.. [4] E. Zemlyanoy, "Generalized cross-validation smoothing splines",
BSc thesis, 2022.
`<https://www.hse.ru/ba/am/students/diplomas/620910604>`_ (in
Russian)
Examples
--------
Generate some noisy data
>>> import numpy as np
>>> np.random.seed(1234)
>>> n = 200
>>> def func(x):
... return x**3 + x**2 * np.sin(4 * x)
>>> x = np.sort(np.random.random_sample(n) * 4 - 2)
>>> y = func(x) + np.random.normal(scale=1.5, size=n)
Make a smoothing spline function
>>> from scipy.interpolate import make_smoothing_spline
>>> spl = make_smoothing_spline(x, y)
Plot both
>>> import matplotlib.pyplot as plt
>>> grid = np.linspace(x[0], x[-1], 400)
>>> plt.plot(x, y, '.')
>>> plt.plot(grid, spl(grid), label='Spline')
>>> plt.plot(grid, func(grid), label='Original function')
>>> plt.legend(loc='best')
>>> plt.show()
""" # noqa:E501
xp = array_namespace(x, y)
x = np.ascontiguousarray(x, dtype=float)
y = np.ascontiguousarray(y, dtype=float)
if any(x[1:] - x[:-1] <= 0):
raise ValueError('``x`` should be an ascending array')
if x.ndim != 1 or x.shape[0] != y.shape[axis]:
raise ValueError(f'``x`` should be 1D and {x.shape = } == {y.shape = }')
if w is None:
w = np.ones(len(x))
else:
w = np.ascontiguousarray(w)
if any(w <= 0):
raise ValueError('Invalid vector of weights')
t = np.r_[[x[0]] * 3, x, [x[-1]] * 3]
n = x.shape[0]
if n <= 4:
raise ValueError('``x`` and ``y`` length must be at least 5')
# Internals assume that the data axis is the zero-th axis
axis = normalize_axis_index(axis, y.ndim)
y = np.moveaxis(y, axis, 0)
# flatten the trailing axes of y to simplify further manipulations
y_shape1 = y.shape[1:]
if y_shape1 != ():
y = y.reshape((n, -1))
# It is known that the solution to the stated minimization problem exists
# and is a natural cubic spline with vector of knots equal to the unique
# elements of ``x`` [3], so we will solve the problem in the basis of
# natural splines.
# create design matrix in the B-spline basis
X_bspl = BSpline.design_matrix(x, t, 3)
# move from B-spline basis to the basis of natural splines using equations
# (2.1.7) [4]
# central elements
X = np.zeros((5, n))
for i in range(1, 4):
X[i, 2: -2] = X_bspl[i: i - 4, 3: -3][np.diag_indices(n - 4)]
# first elements
X[1, 1] = X_bspl[0, 0]
X[2, :2] = ((x[2] + x[1] - 2 * x[0]) * X_bspl[0, 0],
X_bspl[1, 1] + X_bspl[1, 2])
X[3, :2] = ((x[2] - x[0]) * X_bspl[1, 1], X_bspl[2, 2])
# last elements
X[1, -2:] = (X_bspl[-3, -3], (x[-1] - x[-3]) * X_bspl[-2, -2])
X[2, -2:] = (X_bspl[-2, -3] + X_bspl[-2, -2],
(2 * x[-1] - x[-2] - x[-3]) * X_bspl[-1, -1])
X[3, -2] = X_bspl[-1, -1]
# create penalty matrix and divide it by vector of weights: W^{-1} E
wE = np.zeros((5, n))
wE[2:, 0] = _coeff_of_divided_diff(x[:3]) / w[:3]
wE[1:, 1] = _coeff_of_divided_diff(x[:4]) / w[:4]
for j in range(2, n - 2):
wE[:, j] = (x[j+2] - x[j-2]) * _coeff_of_divided_diff(x[j-2:j+3])\
/ w[j-2: j+3]
wE[:-1, -2] = -_coeff_of_divided_diff(x[-4:]) / w[-4:]
wE[:-2, -1] = _coeff_of_divided_diff(x[-3:]) / w[-3:]
wE *= 6
if lam is None:
lam = _compute_optimal_gcv_parameter(X, wE, y, w)
elif lam < 0.:
raise ValueError('Regularization parameter should be non-negative')
# solve the initial problem in the basis of natural splines
if np.ndim(lam) == 0:
c = solve_banded((2, 2), X + lam * wE, y)
elif np.ndim(lam) == 1:
# XXX: solve_banded does not suppport batched `ab` matrices; loop manually
c = np.empty((n, lam.shape[0]))
for i in range(lam.shape[0]):
c[:, i] = solve_banded((2, 2), X + lam[i] * wE, y[:, i])
else:
# this should not happen, ever
raise RuntimeError("Internal error, please report it to SciPy developers.")
c = c.reshape((c.shape[0], *y_shape1))
# hack: these are c[0], c[1] etc, shape-compatible with np.r_ below
c0, c1 = c[0:1, ...], c[1:2, ...] # c[0], c[1]
cm0, cm1 = c[-1:-2:-1, ...], c[-2:-3:-1, ...] # c[-1], c[-2]
# move back to B-spline basis using equations (2.2.10) [4]
c_ = np.r_[c0 * (t[5] + t[4] - 2 * t[3]) + c1,
c0 * (t[5] - t[3]) + c1,
c[1: -1, ...],
cm0 * (t[-4] - t[-6]) + cm1,
cm0 * (2 * t[-4] - t[-5] - t[-6]) + cm1]
t, c_ = xp.asarray(t), xp.asarray(c_)
return BSpline.construct_fast(t, c_, 3, axis=axis)
########################
# FITPACK look-alikes #
########################
def fpcheck(x, t, k, periodic=False):
"""Check consistency of data vector `x` and knot vector `t`.
Parameters
----------
x : array_like, shape (m,)
1D sorted array of data points.
t : array_like, shape (n,)
1D non-decreasing knot vector.
k : int
Degree of the spline.
periodic : bool, optional
Whether the spline is periodic. Default is False.
Raises
------
ValueError
If the configuration of `x`, `t`, and `k` violates any required condition.
"""
# This routine is a unified clone of the FITPACK Fortran routines `fpchec.f`
# and `fpchep.f`:
# - https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchec.f
# - https://github.com/scipy/scipy/blob/main/scipy/interpolate/fitpack/fpchep.f
#
# These routines verify the number and position of the knots t(j), j=1,...,n,
# of a spline of degree k, relative to the number and distribution of data points
# x(i), i=1,...,m. If all of the following conditions are fulfilled,
# validation passes.
#
# For non-periodic splines:
# 1) k+1 <= n-k-1 <= m
# 2) t(1) <= t(2) <= ... <= t(k+1)
# t(n-k) <= t(n-k+1) <= ... <= t(n)
# 3) t(k+1) < t(k+2) < ... < t(n-k)
# 4) t(k+1) <= x(i) <= t(n-k)
# 5) Schoenberg-Whitney conditions hold: there exists a subset y(j) such that
# t(j) < y(j) < t(j+k+1), for j = 1, ..., n-k-1
#
# For periodic splines:
# 1) k+1 <= n-k-1 <= m + k - 1
# 2) Same boundary knot monotonicity as above
# 3) Same strict interior knot increase as above
# 4) t(k+1) <= x(i) <= t(n-k)
# 5) Schoenberg-Whitney conditions must hold for *some periodic shift*
# of the data sequence; i.e. wrapped data points x(i) must satisfy
# t(j) < y(j) < t(j+k+1), j = k+1, ..., n-k-1
x = np.asarray(x)
t = np.asarray(t)
if x.ndim != 1 or t.ndim != 1:
raise ValueError(f"Expect `x` and `t` be 1D sequences. Got {x = } and {t = }")
m = x.shape[0]
n = t.shape[0]
nk1 = n - k - 1
# check condition no 1
if periodic:
# c 1) k+1 <= nk1 <= m+k-1
if not (k + 1 <= nk1 <= m + k - 1):
raise ValueError(f"Need k+1 <= n-k-1 <= m+k-1. Got {m = }, {n = }, {k = }")
else:
# c 1) k+1 <= n-k-1 <= m
if not (k + 1 <= nk1 <= m):
raise ValueError(f"Need k+1 <= n-k-1 <= m. Got {m = }, {n = } and {k = }.")
# check condition no 2
# c 2) t(1) <= t(2) <= ... <= t(k+1)
# c t(n-k) <= t(n-k+1) <= ... <= t(n)
if (t[:k+1] > t[1:k+2]).any():
raise ValueError(f"First k knots must be ordered; got {t = }.")
if (t[nk1:] < t[nk1-1:-1]).any():
raise ValueError(f"Last k knots must be ordered; got {t = }.")
# c check condition no 3
# c 3) t(k+1) < t(k+2) < ... < t(n-k)
if (t[k+1:n-k] <= t[k:n-k-1]).any():
raise ValueError(f"Internal knots must be distinct. Got {t = }.")
# c check condition no 4
# c 4) t(k+1) <= x(i) <= t(n-k)
# NB: FITPACK's fpchec only checks x[0] & x[-1], so we follow.
if (x[0] < t[k]) or (x[-1] > t[n-k-1]):
raise ValueError(f"Out of bounds: {x = } and {t = }.")
# c check condition no 5
# c 5) the conditions specified by Schoenberg and Whitney must hold
# c for at least one subset of data points y(j) such that
# c t(j) < y(j) < t(j+k+1)
# c
# c For non-periodic splines:
# c j = 1, 2, ..., n-k-1 (i.e., j in [1, n-k-1])
# c The data points must lie strictly inside some B-spline supports.
# c
# c For periodic splines:
# c j = k+1, ..., n-k-1
# c The condition must hold for a wrapped subset of the data points,
# c i.e., there exists a cyclic shift of the data such that
# c t(j) < x(i) < t(j+k+1)
# c holds for all j in that range. The test must account for the
# c periodic domain length: per = t(n-k) - t(k+1), and wrap around x(i)
# c as x(i) + per if needed.
mesg = f"Schoenberg-Whitney condition is violated with {t = } and {x =}."
if periodic:
per = t[n - k - 1] - t[k]
m1 = m - 1
for shift in range(1, m):
for j in range(k, nk1):
tj = t[j]
tl = t[j + k + 1]
found = False
for i in range(shift, shift + m1 + 1):
idx = i if i < m else i - m
xi = x[idx] + (0 if i < m else per)
if tj < xi < tl:
found = True
break
if not found:
break
else:
return
raise ValueError(mesg)
else:
if (x[0] >= t[k+1]) or (x[-1] <= t[n-k-2]):
raise ValueError(mesg)
m = x.shape[0]
l = k+1
nk3 = n - k - 3
if nk3 < 2:
return
for j in range(1, nk3+1):
tj = t[j]
l += 1
tl = t[l]
i = np.argmax(x > tj)
if i >= m-1:
raise ValueError(mesg)
if x[i] >= tl:
raise ValueError(mesg)
return
| BSpline |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/soundcloud/tests.py | {
"start": 248,
"end": 1547
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = SoundCloudProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"website": null,
"myspace_name": null,
"public_favorites_count": 0,
"followings_count": 1,
"full_name": "",
"urn": "soundcloud:users:22341947",
"city": null,
"track_count": 0,
"playlist_count": 0,
"discogs_name": null,
"private_tracks_count": 0,
"followers_count": 0,
"online": true,
"username": "user187631676",
"description": null,
"kind": "user",
"website_title": null,
"primary_email_confirmed": false,
"permalink_url": "http://soundcloud.com/user187631676",
"private_playlists_count": 0,
"permalink": "user187631676",
"country": null,
"uri": "https://api.soundcloud.com/users/22341947",
"avatar_url":
"https://a1.sndcdn.com/images/default_avatar_large.png?4b4189b",
"plan": "Free"
}""",
)
def get_expected_to_str(self):
return "user187631676"
| SoundCloudTests |
python | python-excel__xlrd | tests/test_formats.py | {
"start": 287,
"end": 2943
} | class ____(TestCase):
def setUp(self):
self.book = xlrd.open_workbook(from_sample('Formate.xls'), formatting_info=True)
self.sheet = self.book.sheet_by_name(u('BlΓ€tt1'))
def test_text_cells(self):
for row, name in enumerate([u('Huber'), u('Γcker'), u('Γcker')]):
cell = self.sheet.cell(row, 0)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_TEXT)
self.assertEqual(cell.value, name)
self.assertTrue(cell.xf_index > 0)
def test_date_cells(self):
# see also 'Dates in Excel spreadsheets' in the documentation
# convert: xldate_as_tuple(float, book.datemode) -> (year, month,
# day, hour, minutes, seconds)
for row, date in [(0, 2741.), (1, 38406.), (2, 32266.)]:
cell = self.sheet.cell(row, 1)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_DATE)
self.assertEqual(cell.value, date)
self.assertTrue(cell.xf_index > 0)
def test_time_cells(self):
# see also 'Dates in Excel spreadsheets' in the documentation
# convert: xldate_as_tuple(float, book.datemode) -> (year, month,
# day, hour, minutes, seconds)
for row, time in [(3, .273611), (4, .538889), (5, .741123)]:
cell = self.sheet.cell(row, 1)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_DATE)
self.assertAlmostEqual(cell.value, time, places=6)
self.assertTrue(cell.xf_index > 0)
def test_percent_cells(self):
for row, time in [(6, .974), (7, .124)]:
cell = self.sheet.cell(row, 1)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_NUMBER)
self.assertAlmostEqual(cell.value, time, places=3)
self.assertTrue(cell.xf_index > 0)
def test_currency_cells(self):
for row, time in [(8, 1000.30), (9, 1.20)]:
cell = self.sheet.cell(row, 1)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_NUMBER)
self.assertAlmostEqual(cell.value, time, places=2)
self.assertTrue(cell.xf_index > 0)
def test_get_from_merged_cell(self):
sheet = self.book.sheet_by_name(u('ΓΓΓ'))
cell = sheet.cell(2, 2)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_TEXT)
self.assertEqual(cell.value, 'MERGED CELLS')
self.assertTrue(cell.xf_index > 0)
def test_ignore_diagram(self):
sheet = self.book.sheet_by_name(u('BlΓ€tt3'))
cell = sheet.cell(0, 0)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_NUMBER)
self.assertEqual(cell.value, 100)
self.assertTrue(cell.xf_index > 0)
| TestCellContent |
python | getsentry__sentry | tests/sentry/integrations/msteams/test_unlink_identity.py | {
"start": 385,
"end": 4321
} | class ____(TestCase):
def setUp(self) -> None:
super(TestCase, self).setUp()
self.user1 = self.create_user(is_superuser=False)
self.user2 = self.create_user(is_superuser=False)
self.org = self.create_organization(owner=None)
self.team = self.create_team(organization=self.org, members=[self.user1, self.user2])
self.login_as(self.user1)
self.integration = self.create_provider_integration(
provider="msteams",
name="Hogwarts",
external_id="1_50l3mnly_5w34r",
metadata={
"service_url": "https://smba.trafficmanager.net/amer",
"access_token": "3ld3rw4nd",
"expires_at": int(time.time()) + 86400,
},
)
self.create_organization_integration(
organization_id=self.org.id, integration=self.integration
)
self.idp = self.create_identity_provider(type="msteams", external_id="1_50l3mnly_5w34r")
self.conversation_id = "my_conversation_id"
access_json = {"expires_in": 86399, "access_token": "3ld3rw4nd"}
responses.add(
responses.POST,
"https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token",
json=access_json,
)
responses.add(
method=responses.POST,
url=f"https://smba.trafficmanager.net/amer/v3/conversations/{self.conversation_id}/activities",
status=200,
json={},
)
@responses.activate
def test_basic_flow(self) -> None:
teams_user_id = "my-teams-user-id"
Identity.objects.create(
user=self.user1, idp=self.idp, external_id=teams_user_id, status=IdentityStatus.VALID
)
unlink_url = build_unlinking_url(
self.conversation_id, "https://smba.trafficmanager.net/amer", teams_user_id
)
signed_params = unlink_url.split("/")[-2]
params = unsign(signed_params, salt=SALT)
assert params == {
"conversation_id": self.conversation_id,
"service_url": "https://smba.trafficmanager.net/amer",
"teams_user_id": teams_user_id,
}
resp = self.client.get(unlink_url)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/integrations/msteams/unlink-identity.html")
# Unlink identity of user
resp = self.client.post(unlink_url)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/integrations/msteams/unlinked.html")
identity = Identity.objects.filter(external_id=teams_user_id, user=self.user1)
assert len(identity) == 0
assert (
"Your Microsoft Teams identity has been unlinked to your Sentry account."
in responses.calls[1].request.body.decode("utf-8")
)
assert len(responses.calls) == 2
@responses.activate
def test_no_identity(self) -> None:
teams_user_id = "my-teams-user-id"
# identity for a different user
Identity.objects.create(
user=self.user2, idp=self.idp, external_id=teams_user_id, status=IdentityStatus.VALID
)
unlink_url = build_unlinking_url(
self.conversation_id, "https://smba.trafficmanager.net/amer", teams_user_id
)
resp = self.client.get(unlink_url)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/integrations/msteams/unlink-identity.html")
# Unlink identity of user
resp = self.client.post(unlink_url)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/integrations/msteams/no-identity.html")
identity = Identity.objects.filter(external_id=teams_user_id, user=self.user2)
assert len(identity) == 1
assert len(responses.calls) == 0
| MsTeamsIntegrationUnlinkIdentityTest |
python | pytorch__pytorch | torch/_inductor/kernel_inputs.py | {
"start": 378,
"end": 6091
} | class ____(ABC):
"""
Class to store and provide access to input nodes for kernels.
This class takes in a tuple of input nodes and provides methods to access
information about these nodes, such as their device type and device.
"""
def __init__(
self,
input_nodes: list[Any],
scalars: Optional[dict[str, Union[float, int]]] = None,
out_dtype: Optional[torch.dtype] = None,
):
"""
Initialize with a tuple of input nodes.
Args:
input_nodes: A tuple of input nodes to store
out_dtype: Optional output dtype to store
"""
self._input_nodes = input_nodes
self._device_name: Optional[str] = None
self._scalars = scalars if scalars is not None else {}
self._out_dtype = out_dtype
assert len(input_nodes) > 0, "Expected at least one input node"
def nodes(self, reorder: Optional[Sequence[int]] = None) -> list[Any]:
"""
Return the stored input nodes, optionally reordered.
Args:
reorder: Optional sequence of indices to reorder the nodes.
For example, (2, 0, 1) would return nodes in that order.
Returns:
The tuple of input nodes, optionally reordered
"""
if reorder is None:
return self._input_nodes
assert len(self._input_nodes) == len(reorder), (
f"Reorder length mismatch: {len(self._input_nodes)} vs {len(reorder)}"
)
return [self._input_nodes[i] for i in reorder]
@property
def count(self) -> int:
"""
Get the number of input nodes.
Returns:
The number of input nodes
"""
return len(self._input_nodes)
@property
def device_type(self) -> Optional[str]:
"""
Get the device type of the first node.
Returns:
The device type (e.g., 'cuda', 'cpu')
"""
return ir.get_device_type(self._input_nodes[0])
def device(self) -> torch.device:
"""
Get the device of the first node.
Returns:
The device of the first node
"""
return self._input_nodes[0].get_device()
def device_name(self) -> Optional[str]:
"""
Get the device name information.
Returns:
A tuple of (gpu_name, vendor, model)
"""
if self._device_name is None:
device = self.device()
if self.device_type == "cuda":
device_properties = torch.cuda.get_device_properties(device)
self._device_name = device_properties.gcnArchName
return self._device_name
def shapes_symbolic(self) -> tuple[tuple[Any, ...], ...]:
"""
Get the symbolic shapes of all input nodes.
Returns:
A tuple of shape tuples for each input node
"""
return tuple(node.get_size() for node in self._input_nodes)
def shapes_hinted(self) -> tuple[tuple[int, ...], ...]:
"""
Get the size hints for shapes of all input nodes.
Returns:
A tuple of shape tuples with integer hints for each input node
"""
return tuple(
V.graph.sizevars.size_hints(
node.get_size(),
fallback=torch._inductor.config.unbacked_symint_fallback,
)
for node in self._input_nodes
)
def strides_symbolic(self) -> tuple[tuple[sympy.Integer, ...], ...]:
"""
Get the symbolic strides of all input nodes.
Returns:
A tuple of stride tuples for each input node
"""
return tuple(node.get_stride() for node in self._input_nodes)
def strides_hinted(self) -> tuple[tuple[int, ...], ...]:
"""
Get the size hints for strides of all input nodes.
Returns:
A tuple of stride tuples with integer hints for each input node
"""
return tuple(
V.graph.sizevars.size_hints(
node.get_stride(),
fallback=torch._inductor.config.unbacked_symint_fallback,
)
for node in self._input_nodes
)
def dtypes(self) -> tuple[torch.dtype, ...]:
"""
Get the dtypes of all input nodes.
Returns:
A tuple of dtypes for each input node
"""
return tuple(node.get_dtype() for node in self._input_nodes)
def dtype(self, idx: int = 0) -> torch.dtype:
"""
Get the dtype of a specific input node.
Args:
idx: Index of the node to get the dtype from (default: 0)
Returns:
The dtype of the specified input node
"""
return self._input_nodes[idx].get_dtype()
@abstractmethod
def out_dtype(self) -> torch.dtype:
"""
Get the output dtype, whether passed in or inferred from the nodes
Returns:
The output dtype
"""
def get_scalar(self, name: str) -> Union[float, int]:
"""
Get the scalar value for a given name.
Args:
name: Name of the scalar to get
Returns:
The scalar value
"""
assert name in self._scalars, f"Scalar {name} not found, but required"
return self._scalars[name]
@abstractmethod
def output_layout(self, flexible: bool = True) -> Layout:
"""
Abstract method to handle output layout generation.
Args:
out_dtype: Optional output dtype. If not provided, infer from inputs
flexible: If True, return FlexibleLayout, otherwise FixedLayout
"""
| KernelInputs |
python | sqlalchemy__sqlalchemy | test/orm/test_lambdas.py | {
"start": 928,
"end": 11191
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
# we want to test the lambda expiration logic so use backend
# to exercise that
__sparse_driver_backend__ = True
run_setup_mappers = None
@testing.fixture
def plain_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, back_populates="user")
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, back_populates="addresses")
},
)
return User, Address
def test_user_cols_single_lambda(self, plain_fixture):
User, Address = plain_fixture
q = select(lambda: (User.id, User.name)).select_from(lambda: User)
self.assert_compile(q, "SELECT users.id, users.name FROM users")
def test_user_cols_single_lambda_query(self, plain_fixture):
User, Address = plain_fixture
s = fixture_session()
q = s.query(lambda: (User.id, User.name)).select_from(lambda: User)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name FROM users",
)
def test_multiple_entities_single_lambda(self, plain_fixture):
User, Address = plain_fixture
q = select(lambda: (User, Address)).join(lambda: User.addresses)
self.assert_compile(
q,
"SELECT users.id, users.name, addresses.id AS id_1, "
"addresses.user_id, addresses.email_address "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
def test_cols_round_trip(self, plain_fixture):
User, Address = plain_fixture
s = Session(testing.db, future=True)
# note this does a traversal + _clone of the InstrumentedAttribute
# for the first time ever
def query(names):
stmt = lambda_stmt(
lambda: select(User.name, Address.email_address)
.where(User.name.in_(names))
.join(User.addresses)
) + (lambda s: s.order_by(User.id, Address.id))
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.all(),
[
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.all(),
[
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("fred", "fred@fred.com"),
],
)
for i in range(5):
fn = random.choice([go1, go2])
fn()
@testing.combinations(
(True, True),
(True, False),
(False, False),
argnames="use_aliased,use_indirect_access",
)
def test_entity_round_trip(
self, plain_fixture, use_aliased, use_indirect_access
):
User, Address = plain_fixture
s = Session(testing.db, future=True)
if use_aliased:
if use_indirect_access:
def query(names):
class Foo:
def __init__(self):
self.u1 = aliased(User)
f1 = Foo()
stmt = lambda_stmt(
lambda: select(f1.u1)
.where(f1.u1.name.in_(names))
.options(selectinload(f1.u1.addresses)),
track_on=[f1.u1],
).add_criteria(
lambda s: s.order_by(f1.u1.id), track_on=[f1.u1]
)
return s.execute(stmt)
else:
def query(names):
u1 = aliased(User)
stmt = lambda_stmt(
lambda: select(u1)
.where(u1.name.in_(names))
.options(selectinload(u1.addresses))
) + (lambda s: s.order_by(u1.id))
return s.execute(stmt)
else:
def query(names):
stmt = lambda_stmt(
lambda: select(User)
.where(User.name.in_(names))
.options(selectinload(User.addresses))
) + (lambda s: s.order_by(User.id))
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
self.assert_sql_count(testing.db, fn, 2)
def test_lambdas_rejected_in_options(self, plain_fixture):
User, Address = plain_fixture
assert_raises_message(
exc.ArgumentError,
"ExecutionOption Core or ORM object expected, got",
select(lambda: User).options,
lambda: subqueryload(User.addresses),
)
def test_subqueryload_internal_lambda(self, plain_fixture):
User, Address = plain_fixture
s = Session(testing.db, future=True)
def query(names):
stmt = (
select(lambda: User)
.where(lambda: User.name.in_(names))
.options(subqueryload(User.addresses))
.order_by(lambda: User.id)
)
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
self.assert_sql_count(testing.db, fn, 2)
def test_subqueryload_external_lambda_caveats(self, plain_fixture):
User, Address = plain_fixture
s = Session(testing.db, future=True)
def query(names):
stmt = lambda_stmt(
lambda: select(User)
.where(User.name.in_(names))
.options(subqueryload(User.addresses))
) + (lambda s: s.order_by(User.id))
return s.execute(stmt)
def go1():
r1 = query(["ed"])
eq_(
r1.scalars().all(),
[User(name="ed", addresses=[Address(), Address(), Address()])],
)
def go2():
r1 = query(["ed", "fred"])
eq_(
r1.scalars().all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
for i in range(5):
fn = random.choice([go1, go2])
with testing.expect_warnings(
'subqueryloader for "User.addresses" must invoke lambda '
r"callable at .*LambdaElement\(<code object <lambda> "
r".*test_lambdas.py.* in order to produce a new query, "
r"decreasing the efficiency of caching"
):
self.assert_sql_count(testing.db, fn, 2)
@testing.combinations(
lambda s, User, Address: s.query(lambda: User).join(lambda: Address),
lambda s, User, Address: s.query(lambda: User).join(
lambda: User.addresses
),
lambda s, User, Address: s.query(lambda: User).join(
lambda: Address, lambda: User.addresses
),
lambda s, User, Address: s.query(lambda: User).join(
Address, lambda: User.addresses
),
lambda s, User, Address: s.query(lambda: User).join(
lambda: Address, User.addresses
),
lambda User, Address: select(lambda: User)
.join(lambda: Address)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(lambda: User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(lambda: Address, lambda: User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(Address, lambda: User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
lambda User, Address: select(lambda: User)
.join(lambda: Address, User.addresses)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
argnames="test_case",
)
def test_join_entity_arg(self, plain_fixture, test_case):
User, Address = plain_fixture
s = Session(testing.db, future=True)
stmt = testing.resolve_lambda(test_case, **locals())
self.assert_compile(
stmt,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
| LambdaTest |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 359091,
"end": 366931
} | class ____(torch.nn.Module):
def forward(self, x, y, z):
x: "f32[s68, 3]"; y: "f32[s17]"; z: "f32[s68, 3]";
x, y, z, = fx_pytree.tree_flatten_spec(([x, y, z], {}), self._in_spec)
_guards_fn = self._guards_fn(x, y, z); _guards_fn = None
sym_size_int_4: "Sym(s17)" = torch.ops.aten.sym_size.int(y, 0); y = None
sym_size_int_5: "Sym(s68)" = torch.ops.aten.sym_size.int(z, 0)
gt: "Sym(s68 > 5)" = sym_size_int_5 > 5
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
cond = torch.ops.higher_order.cond(gt, true_graph_0, false_graph_0, (x, sym_size_int_4, sym_size_int_5, z)); gt = true_graph_0 = false_graph_0 = x = sym_size_int_4 = sym_size_int_5 = z = None
getitem: "f32[s68, 3]" = cond[0]; cond = None
return pytree.tree_unflatten((getitem,), self._out_spec)
class true_graph_0(torch.nn.Module):
def forward(self, x: "f32[s68, 3]", sym_size_int_4: "Sym(s17)", sym_size_int_5: "Sym(s68)", z: "f32[s68, 3]"):
add: "f32[s68, 3]" = torch.ops.aten.add.Tensor(x, sym_size_int_4); x = sym_size_int_4 = None
return (add,)
class false_graph_0(torch.nn.Module):
def forward(self, x: "f32[s68, 3]", sym_size_int_4: "Sym(s17)", sym_size_int_5: "Sym(s68)", z: "f32[s68, 3]"):
mul: "f32[s68, 3]" = torch.ops.aten.mul.Tensor(z, sym_size_int_5); z = sym_size_int_5 = None
add: "f32[s68, 3]" = torch.ops.aten.add.Tensor(x, mul); x = mul = None
return (add,)
""", # noqa: B950
)
# unbacked symint inputs are created during non-strict export,
# which causes a graph break
@unittest.expectedFailure
def test_cond_unbacked_symint_closure(self):
from torch.export import Dim
class M(torch.nn.Module):
def forward(self, x, y, z):
a = y.shape[0]
b = z.shape[0]
# c is an unbacked symint in non-strict export
c = y.sum().item()
def true_fn(x):
return x + a + c
def false_fn(x):
return x + b * z * c
# When exporting with non-strict: a and b are symints,
# so torch.compile need to wrap and trace symint inputs.
return torch.cond(x.shape[0] > 5, true_fn, false_fn, (x,))
args = (torch.ones(3, 3), torch.ones(5, dtype=torch.int32), torch.ones(3, 3))
model = M()
dynamic_shapes = {"x": {0: Dim("d")}, "y": {0: Dim("d1")}, "z": {0: Dim("d")}}
_ = self._check_export_ret_graph_str(model, args, dynamic_shapes)
@skipIfTorchDynamo(
"Skip because _merge_output is not intended for dynamo to compile"
)
def test_merge_output(self):
from torch._higher_order_ops.cond import _merge_output
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.symbolic_shapes import ShapeEnv
# The shapes and strides are from raondomly generated pairs of tensors then swapaxes
valid_test_cases = [
# [(size1, stride1), (size2, stride2), (expected_stride, expected_size)]
[((3,), (1,)), ((4,), (1,)), ("(u0,)", "(1,)")],
[((1, 3), (3, 1)), ((3, 2), (2, 1)), ("(u0, u1)", "(u1, 1)")],
[((2, 1), (1, 1)), ((7, 3), (3, 1)), ("(u0, u1)", "(u1, 1)")],
[((5, 5), (1, 5)), ((4, 5), (1, 4)), ("(u0, 5)", "(1, u0)")],
[
((7, 3, 1), (1, 7, 1)),
((4, 3, 3), (3, 12, 1)),
("(u0, 3, u1)", "(u1, u0*u1, 1)"),
],
[
((5, 7, 4), (7, 1, 35)),
((7, 4, 4), (4, 1, 28)),
("(u0, u1, 4)", "(u1, 1, u0*u1)"),
],
[
((1, 6, 3, 2), (36, 1, 6, 18)),
((4, 2, 2, 6), (24, 1, 2, 4)),
("(u0, u1, u2, u3)", "(u1*u2*u3, 1, u1, u1*u2)"),
],
[
((6, 1, 6, 3), (18, 1, 1, 6)),
((2, 1, 3, 4), (12, 1, 1, 3)),
("(u0, 1, u1, u2)", "(u1*u2, 1, 1, u1)"),
],
[
((3, 1, 2, 4, 1), (8, 8, 4, 1, 1)),
((2, 4, 1, 4, 1), (16, 4, 4, 1, 1)),
("(u0, u1, u2, 4, 1)", "(4*u1*u2, 4*u2, 4, 1, 1)"),
],
]
def _inner(case):
fake_mode = FakeTensorMode(shape_env=ShapeEnv())
(size1, stride1), (size2, stride2), (merged_size, merged_stride) = case
with fake_mode:
t1 = torch.empty_strided(size1, stride1)
t2 = torch.empty_strided(size2, stride2)
out = _merge_output(t1, t2, fake_mode)
self.assertEqual(str(tuple(out.size())), merged_size)
self.assertEqual(str(tuple(out.stride())), merged_stride)
for case in valid_test_cases:
_inner(case)
# The shapes and strides are from raondomly generated pairs of tensors then swapaxes
invalid_test_cases = [
# [(size1, stride1), (size2, stride2)]
[((1,), (1,)), ((1,), (0,))],
[
((1, 3), (1, 1)),
((5, 6), (6, 1)),
], # t1 is not contiguous, t2 is contiguous
[
((2, 1), (1, 1)),
((7, 3), (1, 3)),
], # t1 is contiguous, t2 is not contiguous
[
((5, 4), (4, 1)),
((5, 5), (1, 5)),
], # t1 is contiguous, t2 is not contiguous
[((7, 3, 1), (1, 7, 1)), ((4, 3, 3), (9, 1, 3))], # layout is different
[((5, 7, 4), (7, 1, 35)), ((7, 4, 4), (4, 28, 1))], # layout is different
[
((1, 6, 3, 2), (36, 1, 6, 18)),
((4, 1, 1, 6), (1, 4, 4, 4)),
], # layout is different
[
((6, 1, 6, 3), (18, 1, 1, 6)),
((1, 1, 1, 1), (1, 1, 1, 1)),
], # layout is different
[
((6, 1, 1, 6, 3), (3, 18, 18, 18, 1)),
((5, 1, 2, 1, 1), (2, 10, 1, 10, 1)),
], # layout is different
]
for case in invalid_test_cases:
with self.assertRaisesRegex(Exception, r"."):
_inner(case)
@parametrize("dynamic", [True, False])
@parametrize("backend", ["eager", "aot_eager"])
def test_cond_mismatched_branch_output(self, dynamic, backend):
class M(torch.nn.Module):
def forward(self, x, y, z):
a = y.shape[0]
b = z.shape[0]
def true_fn(x):
# clone the outputs so branches have the same storage_offset
return (x + a)[2:].clone()
def false_fn(x):
# clone the outputs so branches have the same storage_offset
return (x + b * z)[:2].clone()
ret = torch.cond(x.sum() > 0, true_fn, false_fn, (x,))
return y.sum() - ret
m = M()
x, y, z = torch.randn(5, 4), torch.randn(5, 4), torch.randn(5, 4)
out = m(x, y, z)
if not (backend == "eager" and dynamic and not TEST_WITH_CROSSREF):
compiled_out = torch.compile(
m, backend=backend, dynamic=dynamic, fullgraph=True
)(x, y, z)
self.assertEqual(compiled_out, out)
else:
bk = EagerAndRecordGraphs()
compiled_out = torch.compile(
m, backend=bk, dynamic=dynamic, fullgraph=True
)(x, y, z)
self.assertEqual(compiled_out, out)
self.assertExpectedInline(
normalize_gm(bk.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | huggingface__transformers | tests/repo_utils/test_tests_fetcher.py | {
"start": 1598,
"end": 1849
} | class ____:
'''
This is the docstring.
'''
This is the code
"""
BERT_MODEL_FILE_NEW_DOCSTRING = """from ...modeling_utils import PreTrainedModel
from ...utils import is_torch_available
from .configuration_bert import BertConfig
| BertModel |
python | numba__numba | numba/tests/test_jitclasses.py | {
"start": 1164,
"end": 33198
} | class ____(TestCase, MemoryLeakMixin):
def _check_spec(self, spec=None, test_cls=None, all_expected=None):
if test_cls is None:
@jitclass(spec)
class Test(object):
def __init__(self):
pass
test_cls = Test
clsty = test_cls.class_type.instance_type
names = list(clsty.struct.keys())
values = list(clsty.struct.values())
if all_expected is None:
if isinstance(spec, OrderedDict):
all_expected = spec.items()
else:
all_expected = spec
assert all_expected is not None
self.assertEqual(len(names), len(all_expected))
for got, expected in zip(zip(names, values), all_expected):
self.assertEqual(got[0], expected[0])
self.assertEqual(got[1], expected[1])
def test_ordereddict_spec(self):
spec = OrderedDict()
spec["x"] = int32
spec["y"] = float32
self._check_spec(spec)
def test_list_spec(self):
spec = [("x", int32),
("y", float32)]
self._check_spec(spec)
def test_type_annotations(self):
spec = [("x", int32)]
@jitclass(spec)
class Test1(object):
x: int
y: pt.List[float]
def __init__(self):
pass
self._check_spec(spec, Test1, spec + [("y", types.ListType(float64))])
def test_type_annotation_inheritance(self):
class Foo:
x: int
@jitclass
class Bar(Foo):
y: float
def __init__(self, value: float) -> None:
self.x = int(value)
self.y = value
self._check_spec(
test_cls=Bar, all_expected=[("x", typeof(0)), ("y", typeof(0.0))]
)
def test_spec_errors(self):
spec1 = [("x", int), ("y", float32[:])]
spec2 = [(1, int32), ("y", float32[:])]
class Test(object):
def __init__(self):
pass
with self.assertRaises(TypeError) as raises:
jitclass(Test, spec1)
self.assertIn("spec values should be Numba type instances",
str(raises.exception))
with self.assertRaises(TypeError) as raises:
jitclass(Test, spec2)
self.assertEqual(str(raises.exception),
"spec keys should be strings, got 1")
def test_init_errors(self):
@jitclass([])
class Test:
def __init__(self):
return 7
with self.assertRaises(errors.TypingError) as raises:
Test()
self.assertIn("__init__() should return None, not",
str(raises.exception))
def _make_Float2AndArray(self):
spec = OrderedDict()
spec["x"] = float32
spec["y"] = float32
spec["arr"] = float32[:]
@jitclass(spec)
class Float2AndArray(object):
def __init__(self, x, y, arr):
self.x = x
self.y = y
self.arr = arr
def add(self, val):
self.x += val
self.y += val
return val
return Float2AndArray
def _make_Vector2(self):
spec = OrderedDict()
spec["x"] = int32
spec["y"] = int32
@jitclass(spec)
class Vector2(object):
def __init__(self, x, y):
self.x = x
self.y = y
return Vector2
def test_jit_class_1(self):
Float2AndArray = self._make_Float2AndArray()
Vector2 = self._make_Vector2()
@njit
def bar(obj):
return obj.x + obj.y
@njit
def foo(a):
obj = Float2AndArray(1, 2, a)
obj.add(123)
vec = Vector2(3, 4)
return bar(obj), bar(vec), obj.arr
inp = np.ones(10, dtype=np.float32)
a, b, c = foo(inp)
self.assertEqual(a, 123 + 1 + 123 + 2)
self.assertEqual(b, 3 + 4)
self.assertPreciseEqual(c, inp)
def test_jitclass_usage_from_python(self):
Float2AndArray = self._make_Float2AndArray()
@njit
def identity(obj):
return obj
@njit
def retrieve_attributes(obj):
return obj.x, obj.y, obj.arr
arr = np.arange(10, dtype=np.float32)
obj = Float2AndArray(1, 2, arr)
obj_meminfo = _get_meminfo(obj)
self.assertEqual(obj_meminfo.refcount, 2)
self.assertEqual(obj_meminfo.data, _box.box_get_dataptr(obj))
self.assertEqual(obj._numba_type_.class_type,
Float2AndArray.class_type)
# Use jit class instance in numba
other = identity(obj)
other_meminfo = _get_meminfo(other) # duplicates MemInfo object to obj
self.assertEqual(obj_meminfo.refcount, 4)
self.assertEqual(other_meminfo.refcount, 4)
self.assertEqual(other_meminfo.data, _box.box_get_dataptr(other))
self.assertEqual(other_meminfo.data, obj_meminfo.data)
# Check dtor
del other, other_meminfo
self.assertEqual(obj_meminfo.refcount, 2)
# Check attributes
out_x, out_y, out_arr = retrieve_attributes(obj)
self.assertEqual(out_x, 1)
self.assertEqual(out_y, 2)
self.assertIs(out_arr, arr)
# Access attributes from python
self.assertEqual(obj.x, 1)
self.assertEqual(obj.y, 2)
self.assertIs(obj.arr, arr)
# Access methods from python
self.assertEqual(obj.add(123), 123)
self.assertEqual(obj.x, 1 + 123)
self.assertEqual(obj.y, 2 + 123)
# Setter from python
obj.x = 333
obj.y = 444
obj.arr = newarr = np.arange(5, dtype=np.float32)
self.assertEqual(obj.x, 333)
self.assertEqual(obj.y, 444)
self.assertIs(obj.arr, newarr)
def test_jitclass_datalayout(self):
spec = OrderedDict()
# Boolean has different layout as value vs data
spec["val"] = boolean
@jitclass(spec)
class Foo(object):
def __init__(self, val):
self.val = val
self.assertTrue(Foo(True).val)
self.assertFalse(Foo(False).val)
def test_deferred_type(self):
node_type = deferred_type()
spec = OrderedDict()
spec["data"] = float32
spec["next"] = optional(node_type)
@njit
def get_data(node):
return node.data
@jitclass(spec)
class LinkedNode(object):
def __init__(self, data, next):
self.data = data
self.next = next
def get_next_data(self):
# use deferred type as argument
return get_data(self.next)
def append_to_tail(self, other):
cur = self
while cur.next is not None:
cur = cur.next
cur.next = other
node_type.define(LinkedNode.class_type.instance_type)
first = LinkedNode(123, None)
self.assertEqual(first.data, 123)
self.assertIsNone(first.next)
second = LinkedNode(321, first)
first_meminfo = _get_meminfo(first)
second_meminfo = _get_meminfo(second)
self.assertEqual(first_meminfo.refcount, 3)
self.assertEqual(second.next.data, first.data)
self.assertEqual(first_meminfo.refcount, 3)
self.assertEqual(second_meminfo.refcount, 2)
# Test using deferred type as argument
first_val = second.get_next_data()
self.assertEqual(first_val, first.data)
# Check setattr (issue #2606)
self.assertIsNone(first.next)
second.append_to_tail(LinkedNode(567, None))
self.assertIsNotNone(first.next)
self.assertEqual(first.next.data, 567)
self.assertIsNone(first.next.next)
second.append_to_tail(LinkedNode(678, None))
self.assertIsNotNone(first.next.next)
self.assertEqual(first.next.next.data, 678)
# Check ownership
self.assertEqual(first_meminfo.refcount, 3)
del second, second_meminfo
self.assertEqual(first_meminfo.refcount, 2)
def test_c_structure(self):
spec = OrderedDict()
spec["a"] = int32
spec["b"] = int16
spec["c"] = float64
@jitclass(spec)
class Struct(object):
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
st = Struct(0xabcd, 0xef, 3.1415)
class CStruct(ctypes.Structure):
_fields_ = [
("a", ctypes.c_int32),
("b", ctypes.c_int16),
("c", ctypes.c_double),
]
ptr = ctypes.c_void_p(_box.box_get_dataptr(st))
cstruct = ctypes.cast(ptr, ctypes.POINTER(CStruct))[0]
self.assertEqual(cstruct.a, st.a)
self.assertEqual(cstruct.b, st.b)
self.assertEqual(cstruct.c, st.c)
def test_is(self):
Vector = self._make_Vector2()
vec_a = Vector(1, 2)
@njit
def do_is(a, b):
return a is b
with self.assertRaises(LoweringError) as raises:
# trigger compilation
do_is(vec_a, vec_a)
self.assertIn("no default `is` implementation", str(raises.exception))
def test_isinstance(self):
Vector2 = self._make_Vector2()
vec = Vector2(1, 2)
self.assertIsInstance(vec, Vector2)
def test_subclassing(self):
Vector2 = self._make_Vector2()
with self.assertRaises(TypeError) as raises:
class SubV(Vector2):
pass
self.assertEqual(str(raises.exception),
"cannot subclass from a jitclass")
def test_base_class(self):
class Base(object):
def what(self):
return self.attr
@jitclass([("attr", int32)])
class Test(Base):
def __init__(self, attr):
self.attr = attr
obj = Test(123)
self.assertEqual(obj.what(), 123)
def test_globals(self):
class Mine(object):
constant = 123
def __init__(self):
pass
with self.assertRaises(TypeError) as raises:
jitclass(Mine)
self.assertEqual(str(raises.exception),
"class members are not yet supported: constant")
def test_user_getter_setter(self):
@jitclass([("attr", int32)])
class Foo(object):
def __init__(self, attr):
self.attr = attr
@property
def value(self):
return self.attr + 1
@value.setter
def value(self, val):
self.attr = val - 1
foo = Foo(123)
self.assertEqual(foo.attr, 123)
# Getter
self.assertEqual(foo.value, 123 + 1)
# Setter
foo.value = 789
self.assertEqual(foo.attr, 789 - 1)
self.assertEqual(foo.value, 789)
# Test nopython mode usage of getter and setter
@njit
def bar(foo, val):
a = foo.value
foo.value = val
b = foo.value
c = foo.attr
return a, b, c
a, b, c = bar(foo, 567)
self.assertEqual(a, 789)
self.assertEqual(b, 567)
self.assertEqual(c, 567 - 1)
def test_user_deleter_error(self):
class Foo(object):
def __init__(self):
pass
@property
def value(self):
return 1
@value.deleter
def value(self):
pass
with self.assertRaises(TypeError) as raises:
jitclass(Foo)
self.assertEqual(str(raises.exception),
"deleter is not supported: value")
def test_name_shadowing_error(self):
class Foo(object):
def __init__(self):
pass
@property
def my_property(self):
pass
def my_method(self):
pass
with self.assertRaises(NameError) as raises:
jitclass(Foo, [("my_property", int32)])
self.assertEqual(str(raises.exception), "name shadowing: my_property")
with self.assertRaises(NameError) as raises:
jitclass(Foo, [("my_method", int32)])
self.assertEqual(str(raises.exception), "name shadowing: my_method")
def test_distinct_classes(self):
# Different classes with the same names shouldn't confuse the compiler
@jitclass([("x", int32)])
class Foo(object):
def __init__(self, x):
self.x = x + 2
def run(self):
return self.x + 1
FirstFoo = Foo
@jitclass([("x", int32)])
class Foo(object):
def __init__(self, x):
self.x = x - 2
def run(self):
return self.x - 1
SecondFoo = Foo
foo = FirstFoo(5)
self.assertEqual(foo.x, 7)
self.assertEqual(foo.run(), 8)
foo = SecondFoo(5)
self.assertEqual(foo.x, 3)
self.assertEqual(foo.run(), 2)
def test_parameterized(self):
class MyClass(object):
def __init__(self, value):
self.value = value
def create_my_class(value):
cls = jitclass(MyClass, [("value", typeof(value))])
return cls(value)
a = create_my_class(123)
self.assertEqual(a.value, 123)
b = create_my_class(12.3)
self.assertEqual(b.value, 12.3)
c = create_my_class(np.array([123]))
np.testing.assert_equal(c.value, [123])
d = create_my_class(np.array([12.3]))
np.testing.assert_equal(d.value, [12.3])
def test_protected_attrs(self):
spec = {
"value": int32,
"_value": float32,
"__value": int32,
"__value__": int32,
}
@jitclass(spec)
class MyClass(object):
def __init__(self, value):
self.value = value
self._value = value / 2
self.__value = value * 2
self.__value__ = value - 1
@property
def private_value(self):
return self.__value
@property
def _inner_value(self):
return self._value
@_inner_value.setter
def _inner_value(self, v):
self._value = v
@property
def __private_value(self):
return self.__value
@__private_value.setter
def __private_value(self, v):
self.__value = v
def swap_private_value(self, new):
old = self.__private_value
self.__private_value = new
return old
def _protected_method(self, factor):
return self._value * factor
def __private_method(self, factor):
return self.__value * factor
def check_private_method(self, factor):
return self.__private_method(factor)
value = 123
inst = MyClass(value)
# test attributes
self.assertEqual(inst.value, value)
self.assertEqual(inst._value, value / 2)
self.assertEqual(inst.private_value, value * 2)
# test properties
self.assertEqual(inst._inner_value, inst._value)
freeze_inst_value = inst._value
inst._inner_value -= 1
self.assertEqual(inst._inner_value, freeze_inst_value - 1)
self.assertEqual(inst.swap_private_value(321), value * 2)
self.assertEqual(inst.swap_private_value(value * 2), 321)
# test methods
self.assertEqual(inst._protected_method(3), inst._value * 3)
self.assertEqual(inst.check_private_method(3), inst.private_value * 3)
# test special
self.assertEqual(inst.__value__, value - 1)
inst.__value__ -= 100
self.assertEqual(inst.__value__, value - 101)
# test errors
@njit
def access_dunder(inst):
return inst.__value
with self.assertRaises(errors.TypingError) as raises:
access_dunder(inst)
# It will appear as "_TestJitClass__value" because the `access_dunder`
# is under the scope of "TestJitClass".
self.assertIn("_TestJitClass__value", str(raises.exception))
with self.assertRaises(AttributeError) as raises:
access_dunder.py_func(inst)
self.assertIn("_TestJitClass__value", str(raises.exception))
@skip_if_typeguard
def test_annotations(self):
"""
Methods with annotations should compile fine (issue #1911).
"""
from .annotation_usecases import AnnotatedClass
spec = {"x": int32}
cls = jitclass(AnnotatedClass, spec)
obj = cls(5)
self.assertEqual(obj.x, 5)
self.assertEqual(obj.add(2), 7)
def test_docstring(self):
@jitclass
class Apple(object):
"Class docstring"
def __init__(self):
"init docstring"
def foo(self):
"foo method docstring"
@property
def aval(self):
"aval property docstring"
self.assertEqual(Apple.__doc__, "Class docstring")
self.assertEqual(Apple.__init__.__doc__, "init docstring")
self.assertEqual(Apple.foo.__doc__, "foo method docstring")
self.assertEqual(Apple.aval.__doc__, "aval property docstring")
def test_kwargs(self):
spec = [("a", int32),
("b", float64)]
@jitclass(spec)
class TestClass(object):
def __init__(self, x, y, z):
self.a = x * y
self.b = z
x = 2
y = 2
z = 1.1
kwargs = {"y": y, "z": z}
tc = TestClass(x=2, **kwargs)
self.assertEqual(tc.a, x * y)
self.assertEqual(tc.b, z)
def test_default_args(self):
spec = [("x", int32),
("y", int32),
("z", int32)]
@jitclass(spec)
class TestClass(object):
def __init__(self, x, y, z=1):
self.x = x
self.y = y
self.z = z
tc = TestClass(1, 2, 3)
self.assertEqual(tc.x, 1)
self.assertEqual(tc.y, 2)
self.assertEqual(tc.z, 3)
tc = TestClass(1, 2)
self.assertEqual(tc.x, 1)
self.assertEqual(tc.y, 2)
self.assertEqual(tc.z, 1)
tc = TestClass(y=2, z=5, x=1)
self.assertEqual(tc.x, 1)
self.assertEqual(tc.y, 2)
self.assertEqual(tc.z, 5)
def test_default_args_keyonly(self):
spec = [("x", int32),
("y", int32),
("z", int32),
("a", int32)]
TestClass = jitclass(TestClass1, spec)
tc = TestClass(2, 3)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 3)
self.assertEqual(tc.z, 1)
self.assertEqual(tc.a, 5)
tc = TestClass(y=4, x=2, a=42, z=100)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 4)
self.assertEqual(tc.z, 100)
self.assertEqual(tc.a, 42)
tc = TestClass(y=4, x=2, a=42)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 4)
self.assertEqual(tc.z, 1)
self.assertEqual(tc.a, 42)
tc = TestClass(y=4, x=2)
self.assertEqual(tc.x, 2)
self.assertEqual(tc.y, 4)
self.assertEqual(tc.z, 1)
self.assertEqual(tc.a, 5)
def test_default_args_starargs_and_keyonly(self):
spec = [("x", int32),
("y", int32),
("z", int32),
("args", types.UniTuple(int32, 2)),
("a", int32)]
with self.assertRaises(errors.UnsupportedError) as raises:
jitclass(TestClass2, spec)
msg = "VAR_POSITIONAL argument type unsupported"
self.assertIn(msg, str(raises.exception))
def test_generator_method(self):
spec = []
@jitclass(spec)
class TestClass(object):
def __init__(self):
pass
def gen(self, niter):
for i in range(niter):
yield np.arange(i)
def expected_gen(niter):
for i in range(niter):
yield np.arange(i)
for niter in range(10):
for expect, got in zip(expected_gen(niter), TestClass().gen(niter)):
self.assertPreciseEqual(expect, got)
def test_getitem(self):
spec = [("data", int32[:])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros(10, dtype=np.int32)
def __setitem__(self, key, data):
self.data[key] = data
def __getitem__(self, key):
return self.data[key]
@njit
def create_and_set_indices():
t = TestClass()
t[1] = 1
t[2] = 2
t[3] = 3
return t
@njit
def get_index(t, n):
return t[n]
t = create_and_set_indices()
self.assertEqual(get_index(t, 1), 1)
self.assertEqual(get_index(t, 2), 2)
self.assertEqual(get_index(t, 3), 3)
def test_getitem_unbox(self):
spec = [("data", int32[:])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros(10, dtype=np.int32)
def __setitem__(self, key, data):
self.data[key] = data
def __getitem__(self, key):
return self.data[key]
t = TestClass()
t[1] = 10
@njit
def set2return1(t):
t[2] = 20
return t[1]
t_1 = set2return1(t)
self.assertEqual(t_1, 10)
self.assertEqual(t[2], 20)
def test_getitem_complex_key(self):
spec = [("data", int32[:, :])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros((10, 10), dtype=np.int32)
def __setitem__(self, key, data):
self.data[int(key.real), int(key.imag)] = data
def __getitem__(self, key):
return self.data[int(key.real), int(key.imag)]
t = TestClass()
t[complex(1, 1)] = 3
@njit
def get_key(t, real, imag):
return t[complex(real, imag)]
@njit
def set_key(t, real, imag, data):
t[complex(real, imag)] = data
self.assertEqual(get_key(t, 1, 1), 3)
set_key(t, 2, 2, 4)
self.assertEqual(t[complex(2, 2)], 4)
def test_getitem_tuple_key(self):
spec = [("data", int32[:, :])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros((10, 10), dtype=np.int32)
def __setitem__(self, key, data):
self.data[key[0], key[1]] = data
def __getitem__(self, key):
return self.data[key[0], key[1]]
t = TestClass()
t[1, 1] = 11
@njit
def get11(t):
return t[1, 1]
@njit
def set22(t, data):
t[2, 2] = data
self.assertEqual(get11(t), 11)
set22(t, 22)
self.assertEqual(t[2, 2], 22)
def test_getitem_slice_key(self):
spec = [("data", int32[:])]
@jitclass(spec)
class TestClass(object):
def __init__(self):
self.data = np.zeros(10, dtype=np.int32)
def __setitem__(self, slc, data):
self.data[slc.start] = data
self.data[slc.stop] = data + slc.step
def __getitem__(self, slc):
return self.data[slc.start]
t = TestClass()
# set t.data[1] = 1 and t.data[5] = 2
t[1:5:1] = 1
self.assertEqual(t[1:1:1], 1)
self.assertEqual(t[5:5:5], 2)
@njit
def get5(t):
return t[5:6:1]
self.assertEqual(get5(t), 2)
# sets t.data[2] = data, and t.data[6] = data + 1
@njit
def set26(t, data):
t[2:6:1] = data
set26(t, 2)
self.assertEqual(t[2:2:1], 2)
self.assertEqual(t[6:6:1], 3)
def test_jitclass_longlabel_not_truncated(self):
# See issue #3872, llvm 7 introduced a max label length of 1024 chars
# Numba ships patched llvm 7.1 (ppc64le) and patched llvm 8 to undo this
# change, this test is here to make sure long labels are ok:
alphabet = [chr(ord("a") + x) for x in range(26)]
spec = [(letter * 10, float64) for letter in alphabet]
spec.extend([(letter.upper() * 10, float64) for letter in alphabet])
@jitclass(spec)
class TruncatedLabel(object):
def __init__(self,):
self.aaaaaaaaaa = 10.
def meth1(self):
self.bbbbbbbbbb = random.gauss(self.aaaaaaaaaa, self.aaaaaaaaaa)
def meth2(self):
self.meth1()
# unpatched LLVMs will raise here...
TruncatedLabel().meth2()
def test_pickling(self):
@jitclass
class PickleTestSubject(object):
def __init__(self):
pass
inst = PickleTestSubject()
ty = typeof(inst)
self.assertIsInstance(ty, types.ClassInstanceType)
pickled = pickle.dumps(ty)
self.assertIs(pickle.loads(pickled), ty)
def test_static_methods(self):
@jitclass([("x", int32)])
class Test1:
def __init__(self, x):
self.x = x
def increase(self, y):
self.x = self.add(self.x, y)
return self.x
@staticmethod
def add(a, b):
return a + b
@staticmethod
def sub(a, b):
return a - b
@jitclass([("x", int32)])
class Test2:
def __init__(self, x):
self.x = x
def increase(self, y):
self.x = self.add(self.x, y)
return self.x
@staticmethod
def add(a, b):
return a - b
self.assertIsInstance(Test1.add, Dispatcher)
self.assertIsInstance(Test1.sub, Dispatcher)
self.assertIsInstance(Test2.add, Dispatcher)
self.assertNotEqual(Test1.add, Test2.add)
self.assertEqual(3, Test1.add(1, 2))
self.assertEqual(-1, Test2.add(1, 2))
self.assertEqual(4, Test1.sub(6, 2))
t1 = Test1(0)
t2 = Test2(0)
self.assertEqual(1, t1.increase(1))
self.assertEqual(-1, t2.increase(1))
self.assertEqual(2, t1.add(1, 1))
self.assertEqual(0, t1.sub(1, 1))
self.assertEqual(0, t2.add(1, 1))
self.assertEqual(2j, t1.add(1j, 1j))
self.assertEqual(1j, t1.sub(2j, 1j))
self.assertEqual("foobar", t1.add("foo", "bar"))
with self.assertRaises(AttributeError) as raises:
Test2.sub(3, 1)
self.assertIn("has no attribute 'sub'",
str(raises.exception))
with self.assertRaises(TypeError) as raises:
Test1.add(3)
self.assertIn("not enough arguments: expected 2, got 1",
str(raises.exception))
# Check error message for calling a static method as a class attr from
# another method (currently unsupported).
@jitclass([])
class Test3:
def __init__(self):
pass
@staticmethod
def a_static_method(a, b):
pass
def call_static(self):
return Test3.a_static_method(1, 2)
invalid = Test3()
with self.assertRaises(errors.TypingError) as raises:
invalid.call_static()
self.assertIn("Unknown attribute 'a_static_method'",
str(raises.exception))
def test_jitclass_decorator_usecases(self):
spec = OrderedDict(x=float64)
@jitclass()
class Test1:
x: float
def __init__(self):
self.x = 0
self.assertIsInstance(Test1, JitClassType)
self.assertDictEqual(Test1.class_type.struct, spec)
@jitclass(spec=spec)
class Test2:
def __init__(self):
self.x = 0
self.assertIsInstance(Test2, JitClassType)
self.assertDictEqual(Test2.class_type.struct, spec)
@jitclass
class Test3:
x: float
def __init__(self):
self.x = 0
self.assertIsInstance(Test3, JitClassType)
self.assertDictEqual(Test3.class_type.struct, spec)
@jitclass(spec)
class Test4:
def __init__(self):
self.x = 0
self.assertIsInstance(Test4, JitClassType)
self.assertDictEqual(Test4.class_type.struct, spec)
def test_jitclass_function_usecases(self):
spec = OrderedDict(x=float64)
class AnnotatedTest:
x: float
def __init__(self):
self.x = 0
JitTest1 = jitclass(AnnotatedTest)
self.assertIsInstance(JitTest1, JitClassType)
self.assertDictEqual(JitTest1.class_type.struct, spec)
class UnannotatedTest:
def __init__(self):
self.x = 0
JitTest2 = jitclass(UnannotatedTest, spec)
self.assertIsInstance(JitTest2, JitClassType)
self.assertDictEqual(JitTest2.class_type.struct, spec)
def test_jitclass_isinstance(self):
spec = OrderedDict(value=int32)
@jitclass(spec)
class Foo(object):
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
def getValueIncr(self):
return self.value + 1
@jitclass(spec)
class Bar(object):
def __init__(self, value):
self.value = value
def getValue(self):
return self.value
def test_jitclass_isinstance(obj):
if isinstance(obj, (Foo, Bar)):
# call something that both classes implements
x = obj.getValue()
if isinstance(obj, Foo): # something that only Foo implements
return obj.getValueIncr() + x, 'Foo'
else:
return obj.getValue() + x, 'Bar'
else:
return 'no match'
pyfunc = test_jitclass_isinstance
cfunc = njit(test_jitclass_isinstance)
self.assertIsInstance(Foo, JitClassType)
self.assertEqual(pyfunc(Foo(3)), cfunc(Foo(3)))
self.assertEqual(pyfunc(Bar(123)), cfunc(Bar(123)))
self.assertEqual(pyfunc(0), cfunc(0))
def test_jitclass_unsupported_dunder(self):
with self.assertRaises(TypeError) as e:
@jitclass
class Foo(object):
def __init__(self):
return
def __enter__(self):
return None
Foo()
self.assertIn("Method '__enter__' is not supported.", str(e.exception))
def test_modulename(self):
@jitclass
class TestModname(object):
def __init__(self):
self.x = 12
thisModule = __name__
classModule = TestModname.__module__
self.assertEqual(thisModule, classModule)
| TestJitClass |
python | run-llama__llama_index | llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-couchbase/tests/test_kvstore_couchbase.py | {
"start": 1761,
"end": 5149
} | class ____:
@classmethod
def setup_class(self) -> None:
self.cluster = get_cluster()
self.kvstore = CouchbaseKVStore.from_couchbase_client(
self.cluster,
BUCKET_NAME,
SCOPE_NAME,
)
def test_add_key_value_pair(self):
"""Test adding a key-value pair to the store."""
key = "key1"
value = {"doc": "value1", "status": "active"}
self.kvstore.put(key, value)
doc = self.kvstore.get(key)
assert doc == value
def test_add_key_value_pairs(self):
"""Test adding multiple key-value pairs to the store."""
key1 = "key1"
value1 = {"doc": "value1", "status": "active"}
key2 = "key2"
value2 = {"doc": "value2", "status": "inactive"}
key_value_pairs = [
(key1, value1),
(key2, value2),
]
self.kvstore.put_all(key_value_pairs)
doc1 = self.kvstore.get(key1)
doc2 = self.kvstore.get(key2)
assert doc1 == value1
assert doc2 == value2
def test_delete_key_value_pair(self):
"""Test deleting a key-value pair from the store."""
key = "key1"
value = {"doc": "value1", "status": "active"}
self.kvstore.put(key, value)
doc = self.kvstore.get(key)
assert doc == value
is_deleted = self.kvstore.delete(key)
assert is_deleted
doc = self.kvstore.get(key)
assert doc is None
def test_get_all_key_value_pairs(self):
"""Test getting all key-value pairs from the store."""
key1 = "key1"
value1 = {"doc": "value1", "status": "active"}
key2 = "key2"
value2 = {"doc": "value2", "status": "inactive"}
key_value_pairs = [
(key1, value1),
(key2, value2),
]
self.kvstore.put_all(key_value_pairs, batch_size=2)
docs = self.kvstore.get_all()
assert len(docs) == 2
assert key1 in docs
assert key2 in docs
def test_delete_multiple_key_value_pairs(self):
"""Test deleting multiple key-value pairs from the store."""
key1 = "key1"
value1 = {"doc": "value1", "status": "active"}
key2 = "key2"
value2 = {"doc": "value2", "status": "inactive"}
key_value_pairs = [
(key1, value1),
(key2, value2),
]
self.kvstore.put_all(key_value_pairs, batch_size=2)
docs = self.kvstore.get_all()
assert len(docs) == 2
is_deleted = self.kvstore.delete(key1)
assert is_deleted
doc1 = self.kvstore.get(key1)
assert doc1 is None
is_deleted = self.kvstore.delete(key2)
assert is_deleted
doc2 = self.kvstore.get(key2)
assert doc2 is None
def test_non_default_collection(self):
"""Test adding a key-value pair to a non-default collection."""
key = "key1"
value = {"doc": "value1", "status": "active"}
collection = "test_collection"
self.kvstore.put(key, value, collection=collection)
doc = self.kvstore.get(key, collection=collection)
assert doc == value
is_deleted = self.kvstore.delete(key, collection=collection)
assert is_deleted
doc = self.kvstore.get(key, collection=collection)
assert doc is None
| TestCouchbaseKVStore |
python | ipython__ipython | tests/test_completer.py | {
"start": 6746,
"end": 6954
} | class ____:
instances = {}
def __init__(self, name):
self.instances[name] = self
@classmethod
def _ipython_key_completions_(cls):
return cls.instances.keys()
| NamedInstanceClass |
python | jina-ai__jina | jina/logging/profile.py | {
"start": 7126,
"end": 8842
} | class ____:
"""Timing a code snippet with a context manager."""
time_attrs = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']
def __init__(self, task_name: str, logger: 'JinaLogger' = None):
"""
Create the context manager to timing a code snippet.
:param task_name: The context/message.
:param logger: Use existing logger or use naive :func:`print`.
Example:
.. highlight:: python
.. code-block:: python
with TimeContext('loop'):
do_busy()
"""
self.task_name = task_name
self._logger = logger
self.duration = 0
def __enter__(self):
self.start = time.perf_counter()
self._enter_msg()
return self
def _enter_msg(self):
if self._logger:
self._logger.info(self.task_name + '...')
else:
print(self.task_name, end=' ...\t', flush=True)
def __exit__(self, typ, value, traceback):
self.duration = self.now()
self.readable_duration = get_readable_time(seconds=self.duration)
self._exit_msg()
def now(self) -> float:
"""
Get the passed time from start to now.
:return: passed time
"""
return time.perf_counter() - self.start
def _exit_msg(self):
if self._logger:
self._logger.info(
f'{self.task_name} takes {self.readable_duration} ({self.duration:.2f}s)'
)
else:
print(
colored(
f'{self.task_name} takes {self.readable_duration} ({self.duration:.2f}s)'
),
flush=True,
)
| TimeContext |
python | mlflow__mlflow | tests/store/artifact/test_http_artifact_repo.py | {
"start": 1348,
"end": 1560
} | class ____(MockResponse):
def iter_content(self, chunk_size):
yield self.data.encode("utf-8")
def __enter__(self):
return self
def __exit__(self, *exc):
pass
| MockStreamResponse |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 5274,
"end": 6263
} | class ____(ActionBaseModel):
schedule: Optional[SCHEDULE_TYPES] = Field(
default=None, description="The schedule for the deployment."
)
active: Optional[bool] = Field(
default=None, description="Whether or not the schedule is active."
)
max_scheduled_runs: Optional[PositiveInteger] = Field(
default=None,
description="The maximum number of scheduled runs for the schedule.",
)
parameters: Optional[dict[str, Any]] = Field(
default=None,
description="Parameter overrides for the schedule.",
)
slug: Optional[str] = Field(
default=None,
description="A unique identifier for the schedule.",
)
@field_validator("max_scheduled_runs")
@classmethod
def validate_max_scheduled_runs(cls, v: Optional[int]) -> Optional[int]:
return validate_schedule_max_scheduled_runs(
v, PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value()
)
| DeploymentScheduleUpdate |
python | walkccc__LeetCode | solutions/515. Find Largest Value in Each Tree Row/515.py | {
"start": 0,
"end": 426
} | class ____:
def largestValues(self, root: TreeNode | None) -> list[int]:
if not root:
return []
ans = []
q = collections.deque([root])
while q:
mx = -math.inf
for _ in range(len(q)):
root = q.popleft()
mx = max(mx, root.val)
if root.left:
q.append(root.left)
if root.right:
q.append(root.right)
ans.append(mx)
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximum-students-on-a-single-bench.py | {
"start": 78,
"end": 408
} | class ____(object):
def maxStudentsOnBench(self, students):
"""
:type students: List[List[int]]
:rtype: int
"""
lookup = collections.defaultdict(set)
for s, b in students:
lookup[b].add(s)
return max(len(x) for x in lookup.itervalues()) if lookup else 0
| Solution |
python | django__django | tests/admin_widgets/models.py | {
"start": 4707,
"end": 5045
} | class ____(models.Model):
"""
A model with a m2m to a model that won't be registered with the admin
(Company) so the corresponding raw ID widget won't have a magnifying
glass link to select related company instances.
"""
name = models.CharField(max_length=20)
companies = models.ManyToManyField(Company)
| Advisor |
python | PyCQA__pylint | tests/regrtest_data/max_inferable_limit_for_classes/nodes/roles.py | {
"start": 389,
"end": 431
} | class ____(ColumnListRole):
...
| ByOfRole |
python | doocs__leetcode | solution/2100-2199/2116.Check if a Parentheses String Can Be Valid/Solution.py | {
"start": 0,
"end": 569
} | class ____:
def canBeValid(self, s: str, locked: str) -> bool:
n = len(s)
if n & 1:
return False
x = 0
for i in range(n):
if s[i] == '(' or locked[i] == '0':
x += 1
elif x:
x -= 1
else:
return False
x = 0
for i in range(n - 1, -1, -1):
if s[i] == ')' or locked[i] == '0':
x += 1
elif x:
x -= 1
else:
return False
return True
| Solution |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 9741,
"end": 9971
} | class ____:
"""Table 8.2 of the PDF 1.7 reference."""
XYZ = "/XYZ"
FIT = "/Fit"
FIT_H = "/FitH"
FIT_V = "/FitV"
FIT_R = "/FitR"
FIT_B = "/FitB"
FIT_BH = "/FitBH"
FIT_BV = "/FitBV"
| TypFitArguments |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 48654,
"end": 51471
} | class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.layers = nn.ModuleList(
[EdgeTamVideoMemoryAttentionLayer(config) for _ in range(config.memory_attention_num_layers)]
)
self.layer_norm = nn.LayerNorm(config.memory_attention_hidden_size)
self.rotary_emb = EdgeTamVideoVisionRotaryEmbedding(config=config)
self.rotary_emb_k = EdgeTamVideoVisionRotaryEmbedding(
config, end_x=config.memory_attention_rope_k_sizes[0], end_y=config.memory_attention_rope_k_sizes[1]
)
def forward(
self,
current_vision_features: torch.Tensor,
memory: torch.Tensor,
current_vision_position_embeddings: Optional[Tensor] = None,
memory_posision_embeddings: Optional[Tensor] = None,
num_object_pointer_tokens: int = 0,
num_spatial_memory_tokens: int = -1,
):
"""
Args:
current_vision_features (`torch.FloatTensor`):
The current vision features used for self-attention.
memory (`torch.FloatTensor`):
The memory features used for cross-attention.
current_vision_position_embeddings (`torch.FloatTensor`, *optional*):
The position embeddings for the current vision features.
memory_posision_embeddings (`torch.FloatTensor`, *optional*):
The position embeddings for the memory features.
num_object_pointer_tokens (`int`, *optional*, defaults to 0):
The number of object pointer tokens.
"""
output = current_vision_features
if current_vision_position_embeddings is not None:
output = output + 0.1 * current_vision_position_embeddings
# Convert to batch first
output = output.transpose(0, 1)
memory = memory.transpose(0, 1).unsqueeze(1)
memory_posision_embeddings = memory_posision_embeddings.transpose(0, 1).unsqueeze(1)
rope_position_embeddings = self.rotary_emb()
rope_position_embeddings_k = self.rotary_emb_k()
for layer in self.layers:
output = layer(
queries=output.unsqueeze(1) if output.ndim == 3 else output,
keys=memory,
key_point_embedding=memory_posision_embeddings,
rope_position_embeddings=rope_position_embeddings,
rope_position_embeddings_k=rope_position_embeddings_k,
num_k_exclude_rope=num_object_pointer_tokens,
rope_k_repeat=num_spatial_memory_tokens,
)
normed_output = self.layer_norm(output)
# Convert back to seq first
normed_output = normed_output.transpose(0, 1)
return normed_output
| EdgeTamVideoMemoryAttention |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 19886,
"end": 21321
} | class ____(ExternalRenderer):
"""
Renderer to display interactive figures in an external web browser.
This renderer will open a new browser window or tab when the
plotly.io.show function is called on a figure.
This renderer has no ipython/jupyter dependencies and is a good choice
for use in environments that do not support the inline display of
interactive figures.
mime type: 'text/html'
"""
def __init__(
self,
config=None,
auto_play=False,
using=None,
new=0,
autoraise=True,
post_script=None,
animation_opts=None,
):
self.config = config
self.auto_play = auto_play
self.using = using
self.new = new
self.autoraise = autoraise
self.post_script = post_script
self.animation_opts = animation_opts
def render(self, fig_dict):
from plotly.io import to_html
html = to_html(
fig_dict,
config=self.config,
auto_play=self.auto_play,
include_plotlyjs=True,
include_mathjax="cdn",
post_script=self.post_script,
full_html=True,
animation_opts=self.animation_opts,
default_width="100%",
default_height="100%",
validate=False,
)
open_html_in_browser(html, self.using, self.new, self.autoraise)
| BrowserRenderer |
python | pypa__pip | src/pip/_vendor/pygments/filters/__init__.py | {
"start": 33990,
"end": 34881
} | class ____(Filter):
"""Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
| RaiseOnErrorTokenFilter |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/workflow_engine_data_condition.py | {
"start": 1144,
"end": 6021
} | class ____(Serializer):
def get_attrs(
self,
item_list: Sequence[DataCondition],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> defaultdict[DataCondition, dict[str, list[str]]]:
detector_triggers = {item.id: item for item in item_list}
detector_trigger_ids = [dc.id for dc in item_list]
# below, we go from detector trigger to action filter
detector_ids = Subquery(
Detector.objects.filter(
workflow_condition_group__in=[
detector_trigger.condition_group
for detector_trigger in detector_triggers.values()
]
).values_list("id", flat=True)
)
workflow_dcg_ids = DataConditionGroup.objects.filter(
workflowdataconditiongroup__workflow__in=Subquery(
DetectorWorkflow.objects.filter(detector__in=detector_ids).values_list(
"workflow_id", flat=True
)
)
).values_list("id", flat=True)
action_filter_data_condition_groups = DataCondition.objects.filter(
comparison__in=[item.condition_result for item in item_list],
condition_group__in=Subquery(workflow_dcg_ids),
).values_list("condition_group", flat=True)
action_filter_data_condition_group_action_ids = DataConditionGroupAction.objects.filter(
condition_group_id__in=Subquery(action_filter_data_condition_groups)
).values_list("action_id", flat=True)
actions = Action.objects.filter(
id__in=Subquery(action_filter_data_condition_group_action_ids)
).order_by("id")
serialized_actions = serialize(
list(actions), user, WorkflowEngineActionSerializer(), **kwargs
)
result: DefaultDict[DataCondition, dict[str, list[str]]] = defaultdict(dict)
for data_condition in detector_triggers:
result[detector_triggers[data_condition]]["actions"] = []
for action in serialized_actions:
# in practice we only ever have 1 data condition in the item list at a time, but we may have multiple actions
result[detector_triggers[detector_trigger_ids[0]]]["actions"].append(action)
return result
def serialize(
self,
obj: DataCondition,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> dict[str, Any]:
# XXX: we are assuming that the obj/DataCondition is a detector trigger
detector = Detector.objects.get(workflow_condition_group=obj.condition_group)
try:
alert_rule_trigger_id = DataConditionAlertRuleTrigger.objects.values_list(
"alert_rule_trigger_id", flat=True
).get(data_condition=obj)
except DataConditionAlertRuleTrigger.DoesNotExist:
# this data condition does not have an analog in the old system,
# but we need to return *something*
alert_rule_trigger_id = get_fake_id_from_object_id(obj.id)
try:
alert_rule_id = AlertRuleDetector.objects.values_list("alert_rule_id", flat=True).get(
detector=detector.id
)
except AlertRuleDetector.DoesNotExist:
# this detector does not have an analog in the old system
alert_rule_id = get_fake_id_from_object_id(detector.id)
if obj.type == Condition.ANOMALY_DETECTION:
threshold_type = obj.comparison["threshold_type"]
resolve_threshold = None
else:
threshold_type = (
AlertRuleThresholdType.ABOVE.value
if obj.type == Condition.GREATER
else AlertRuleThresholdType.BELOW.value
)
resolve_threshold = translate_data_condition_type(
detector.config.get("comparison_delta"),
obj.type,
get_resolve_threshold(obj.condition_group),
)
return {
"id": str(alert_rule_trigger_id),
"alertRuleId": str(alert_rule_id),
"label": (
"critical" if obj.condition_result == DetectorPriorityLevel.HIGH else "warning"
),
"thresholdType": threshold_type,
"alertThreshold": translate_data_condition_type(
detector.config.get("comparison_delta"),
obj.type,
(
0 if obj.type == Condition.ANOMALY_DETECTION else obj.comparison
), # to replicate existing behavior, where anomaly detection triggers have a threshold of 0
),
"resolveThreshold": resolve_threshold,
"dateCreated": obj.date_added,
"actions": attrs.get("actions", []),
}
| WorkflowEngineDataConditionSerializer |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 1429,
"end": 1666
} | class ____:
def __init__(self):
pass
def __private(self):
pass
def __private2_(self):
pass
def ___private3(self):
pass
def method(self, param):
raise NotImplementedError
| Super |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 2739,
"end": 2888
} | class ____:
@classmethod
def f(x):
#? 16 text {'new_name': 'ab'}
return 25
# ++++++++++++++++++++++++++++++++++++++++++++++++++
| X |
python | doocs__leetcode | solution/0400-0499/0492.Construct the Rectangle/Solution.py | {
"start": 0,
"end": 180
} | class ____:
def constructRectangle(self, area: int) -> List[int]:
w = int(sqrt(area))
while area % w != 0:
w -= 1
return [area // w, w]
| Solution |
python | django__django | tests/urlpatterns_reverse/tests.py | {
"start": 13328,
"end": 14054
} | class ____(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r"^$"), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see the 'urlpatterns' "
"variable with valid patterns in the file then the issue is "
"probably caused by a circular import.",
):
getattr(resolver, "url_patterns")
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
| NoURLPatternsTests |
python | pytorch__pytorch | torch/_inductor/runtime/triton_heuristics.py | {
"start": 57644,
"end": 57793
} | class ____:
def __init__(self, value: str):
self.value = value
def __call__(self, _=None) -> str:
return self.value
| _ConstRepr |
python | explosion__spaCy | spacy/schemas.py | {
"start": 9783,
"end": 10243
} | class ____(ConstrainedStr):
regex = re.compile(r"^({\d+}|{\d+,\d*}|{\d*,\d+})$")
TokenPatternOperator = Union[TokenPatternOperatorSimple, TokenPatternOperatorMinMax]
StringValue = Union[TokenPatternString, StrictStr]
NumberValue = Union[TokenPatternNumber, StrictInt, StrictFloat]
UnderscoreValue = Union[
TokenPatternString, TokenPatternNumber, str, int, float, list, bool
]
IobValue = Literal["", "I", "O", "B", 0, 1, 2, 3]
| TokenPatternOperatorMinMax |
python | Textualize__textual | src/textual/_binary_encode.py | {
"start": 340,
"end": 7653
} | class ____(Exception):
"""A problem decoding data."""
def dump(data: object) -> bytes:
"""Encodes a data structure into bytes.
Args:
data: Data structure
Returns:
A byte string encoding the data.
"""
def encode_none(_datum: None) -> bytes:
"""
Encodes a None value.
Args:
datum: Always None.
Returns:
None encoded.
"""
return b"N"
def encode_bool(datum: bool) -> bytes:
"""
Encode a boolean value.
Args:
datum: The boolean value to encode.
Returns:
The encoded bytes.
"""
return b"T" if datum else b"F"
def encode_int(datum: int) -> bytes:
"""
Encode an integer value.
Args:
datum: The integer value to encode.
Returns:
The encoded bytes.
"""
return b"i%ie" % datum
def encode_bytes(datum: bytes) -> bytes:
"""
Encode a bytes value.
Args:
datum: The bytes value to encode.
Returns:
The encoded bytes.
"""
return b"%i:%s" % (len(datum), datum)
def encode_string(datum: str) -> bytes:
"""
Encode a string value.
Args:
datum: The string value to encode.
Returns:
The encoded bytes.
"""
encoded_data = datum.encode("utf-8")
return b"s%i:%s" % (len(encoded_data), encoded_data)
def encode_list(datum: list) -> bytes:
"""
Encode a list value.
Args:
datum: The list value to encode.
Returns:
The encoded bytes.
"""
return b"l%se" % b"".join(encode(element) for element in datum)
def encode_tuple(datum: tuple) -> bytes:
"""
Encode a tuple value.
Args:
datum: The tuple value to encode.
Returns:
The encoded bytes.
"""
return b"t%se" % b"".join(encode(element) for element in datum)
def encode_dict(datum: dict) -> bytes:
"""
Encode a dictionary value.
Args:
datum: The dictionary value to encode.
Returns:
The encoded bytes.
"""
return b"d%se" % b"".join(
b"%s%s" % (encode(key), encode(value)) for key, value in datum.items()
)
ENCODERS: dict[type, Callable[[Any], Any]] = {
type(None): encode_none,
bool: encode_bool,
int: encode_int,
bytes: encode_bytes,
str: encode_string,
list: encode_list,
tuple: encode_tuple,
dict: encode_dict,
}
def encode(datum: object) -> bytes:
"""Recursively encode data.
Args:
datum: Data suitable for encoding.
Raises:
TypeError: If `datum` is not one of the supported types.
Returns:
Encoded data bytes.
"""
try:
decoder = ENCODERS[type(datum)]
except KeyError:
raise TypeError("Can't encode {datum!r}") from None
return decoder(datum)
return encode(data)
def load(encoded: bytes) -> object:
"""Load an encoded data structure from bytes.
Args:
encoded: Encoded data in bytes.
Raises:
DecodeError: If an error was encountered decoding the string.
Returns:
Decoded data.
"""
if not isinstance(encoded, bytes):
raise TypeError("must be bytes")
max_position = len(encoded)
position = 0
def get_byte() -> bytes:
"""Get an encoded byte and advance position.
Raises:
DecodeError: If the end of the data was reached
Returns:
A bytes object with a single byte.
"""
nonlocal position
if position >= max_position:
raise DecodeError("More data expected")
character = encoded[position : position + 1]
position += 1
return character
def peek_byte() -> bytes:
"""Get the byte at the current position, but don't advance position.
Returns:
A bytes object with a single byte.
"""
return encoded[position : position + 1]
def get_bytes(size: int) -> bytes:
"""Get a number of bytes of encode data.
Args:
size: Number of bytes to retrieve.
Raises:
DecodeError: If there aren't enough bytes.
Returns:
A bytes object.
"""
nonlocal position
bytes_data = encoded[position : position + size]
if len(bytes_data) != size:
raise DecodeError(b"Missing bytes in {bytes_data!r}")
position += size
return bytes_data
def decode_int() -> int:
"""Decode an int from the encoded data.
Returns:
An integer.
"""
int_bytes = b""
while (byte := get_byte()) != b"e":
int_bytes += byte
return int(int_bytes)
def decode_bytes(size_bytes: bytes) -> bytes:
"""Decode a bytes string from the encoded data.
Returns:
A bytes object.
"""
while (byte := get_byte()) != b":":
size_bytes += byte
bytes_string = get_bytes(int(size_bytes))
return bytes_string
def decode_string() -> str:
"""Decode a (utf-8 encoded) string from the encoded data.
Returns:
A string.
"""
size_bytes = b""
while (byte := get_byte()) != b":":
size_bytes += byte
bytes_string = get_bytes(int(size_bytes))
decoded_string = bytes_string.decode("utf-8", errors="replace")
return decoded_string
def decode_list() -> list[object]:
"""Decode a list.
Returns:
A list of data.
"""
elements: list[object] = []
add_element = elements.append
while peek_byte() != b"e":
add_element(decode())
get_byte()
return elements
def decode_tuple() -> tuple[object, ...]:
"""Decode a tuple.
Returns:
A tuple of decoded data.
"""
elements: list[object] = []
add_element = elements.append
while peek_byte() != b"e":
add_element(decode())
get_byte()
return tuple(elements)
def decode_dict() -> dict[object, object]:
"""Decode a dict.
Returns:
A dict of decoded data.
"""
elements: dict[object, object] = {}
add_element = elements.__setitem__
while peek_byte() != b"e":
add_element(decode(), decode())
get_byte()
return elements
DECODERS = {
b"i": decode_int,
b"s": decode_string,
b"l": decode_list,
b"t": decode_tuple,
b"d": decode_dict,
b"T": lambda: True,
b"F": lambda: False,
b"N": lambda: None,
}
def decode() -> object:
"""Recursively decode data.
Returns:
Decoded data.
"""
decoder = DECODERS.get(initial := get_byte(), None)
if decoder is None:
return decode_bytes(initial)
return decoder()
return decode()
| DecodeError |
python | scikit-learn__scikit-learn | sklearn/naive_bayes.py | {
"start": 33974,
"end": 39426
} | class ____(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=True
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value of `force_alpha` changed to `True`.
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"norm": ["boolean"],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha=True,
fit_prior=True,
class_prior=None,
norm=False,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.norm = norm
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.positive_only = True
return tags
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
| ComplementNB |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 89689,
"end": 90107
} | class ____(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
| AppStack |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 58257,
"end": 58878
} | class ____(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
| HeaderProperty |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_apply_configuration.py | {
"start": 383,
"end": 8012
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str'
}
attribute_map = {
'expression': 'expression'
}
def __init__(self, expression=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ApplyConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self.discriminator = None
if expression is not None:
self.expression = expression
@property
def expression(self):
"""Gets the expression of this V1beta1ApplyConfiguration. # noqa: E501
expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec Apply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field: Object{ spec: Object.spec{ serviceAccountName: \"example\" } } Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration. CEL expressions have access to the object types needed to create apply configurations: - 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers') CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required. # noqa: E501
:return: The expression of this V1beta1ApplyConfiguration. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1beta1ApplyConfiguration.
expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec Apply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field: Object{ spec: Object.spec{ serviceAccountName: \"example\" } } Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration. CEL expressions have access to the object types needed to create apply configurations: - 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers') CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value. For example, a variable named 'foo' can be accessed as 'variables.foo'. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required. # noqa: E501
:param expression: The expression of this V1beta1ApplyConfiguration. # noqa: E501
:type: str
"""
self._expression = expression
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ApplyConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ApplyConfiguration):
return True
return self.to_dict() != other.to_dict()
| V1beta1ApplyConfiguration |
python | pytorch__pytorch | torch/serialization.py | {
"start": 26368,
"end": 27501
} | class ____(_opener[torch._C.PyTorchFileWriter]):
def __init__(self, name: str) -> None:
self.file_stream = None
self.name = name
try:
self.name.encode("ascii")
except UnicodeEncodeError:
# PyTorchFileWriter only supports ascii filename.
# For filenames with non-ascii characters, we rely on Python
# for writing out the file.
# pyrefly: ignore [bad-assignment]
self.file_stream = io.FileIO(self.name, mode="w")
super().__init__(
torch._C.PyTorchFileWriter( # pyrefly: ignore # no-matching-overload
self.file_stream, get_crc32_options(), _get_storage_alignment()
)
)
else:
super().__init__(
torch._C.PyTorchFileWriter(
self.name, get_crc32_options(), _get_storage_alignment()
)
)
def __exit__(self, *args) -> None:
self.file_like.write_end_of_file()
if self.file_stream is not None:
self.file_stream.close()
| _open_zipfile_writer_file |
python | neetcode-gh__leetcode | python/0011-container-with-most-water.py | {
"start": 0,
"end": 358
} | class ____:
def maxArea(self, height: List[int]) -> int:
l, r = 0, len(height) - 1
res = 0
while l < r:
res = max(res, min(height[l], height[r]) * (r - l))
if height[l] < height[r]:
l += 1
elif height[r] <= height[l]:
r -= 1
return res
| Solution |
python | Netflix__metaflow | metaflow/plugins/secrets/inline_secrets_provider.py | {
"start": 55,
"end": 294
} | class ____(SecretsProvider):
TYPE = "inline"
def get_secret_as_dict(self, secret_id, options={}, role=None):
"""Intended to be used for testing purposes only."""
return options.get("env_vars", {})
| InlineSecretsProvider |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 81985,
"end": 87759
} | class ____(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
mems: Optional[torch.Tensor] = None,
perm_mask: Optional[torch.Tensor] = None,
target_mapping: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
input_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
use_mems: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs, # delete when `use_cache` is removed in XLNetModel
) -> Union[tuple, XLNetForQuestionAnsweringSimpleOutput]:
r"""
mems (`list[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential
decoding. The token ids which have their past given to this model should not be passed as `input_ids` as
they have already been computed.
`use_mems` has to be set to `True` to make use of `mems`.
perm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):
Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:
- if `perm_mask[k, i, j] = 0`, i attend to j in batch k;
- if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.
If not set, each token attends to all the others (full bidirectional attention). Only used during
pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):
Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is
on the j-th token. Only used during pretraining for partial prediction or for sequential decoding
(generation).
input_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*):
Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for
real tokens and 1 for padding which is kept for compatibility with the original code base.
Mask values selected in `[0, 1]`:
- 1 for tokens that are **masked**,
- 0 for tokens that are **not masked**.
You can only uses one of `input_mask` and `attention_mask`.
use_mems (`bool`, *optional*):
Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden
states from previous forward passes to compute attention, which can significantly improve performance for
sequential decoding tasks.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
inputs_embeds=inputs_embeds,
use_mems=use_mems,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return XLNetForQuestionAnsweringSimpleOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
mems=outputs.mems,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| XLNetForQuestionAnsweringSimple |
python | pytorch__pytorch | test/test_ops.py | {
"start": 112445,
"end": 122703
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Turn on FakeTensor caching and cross-checking for these tests:
cache_enabled = unittest.mock.patch(
"torch._dynamo.config.fake_tensor_cache_enabled", True
)
cache_enabled.start()
self.addCleanup(cache_enabled.stop)
cache_crosscheck = unittest.mock.patch(
"torch._dynamo.config.fake_tensor_cache_crosscheck_enabled", True
)
cache_crosscheck.start()
self.addCleanup(cache_crosscheck.stop)
def _test_fake_helper(self, device, dtype, op, context):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
mode = FakeTensorMode()
from torch.fx.experimental.symbolic_shapes import ShapeEnv
allow_dynamic_output_shape_shape_env = ShapeEnv(
allow_dynamic_output_shape_ops=True
)
allow_dynamic_output_shape_mode = FakeTensorMode(
shape_env=allow_dynamic_output_shape_shape_env
)
try:
with context():
res = op(sample.input, *sample.args, **sample.kwargs)
except Exception:
continue
def run_with_fake_mode_and_verify(fake_mode, match_results=True):
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return fake_mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
with context():
with fake_mode:
res_fake = op(input, *args, **kwargs)
if not match_results:
return
for fake_out, real_out in zip(
pytree.tree_leaves(res_fake), pytree.tree_leaves(res)
):
if not isinstance(fake_out, torch.Tensor):
self.assertTrue(not isinstance(real_out, torch.Tensor))
self.assertEqual(fake_out, real_out)
continue
self.assertTrue(isinstance(fake_out, FakeTensor))
# if you see a shape exception here, you may need to add
# a `dynamic_output_shape` tag to an operator
if op.op not in [
torch.ops.aten._efficient_attention_forward,
torch.ops.aten._flash_attention_forward,
]:
# prims/decomps must correctly model strides,
# see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325
# note: the excluded ops have intentionally incorrect device;
# see "Note [Seed and Offset]" (_meta_registrations.py)
prims.utils.compare_tensor_meta(fake_out, real_out, True)
if name not in aliasing_failures:
fake_aliasing = outputs_alias_inputs(
(input, args, kwargs), res_fake
)
real_aliasing = outputs_alias_inputs(
(sample.input, sample, args, sample.kwargs), res
)
self.assertEqual(fake_aliasing, real_aliasing)
self.assertTrue(
name not in dynamic_output_op_tests
and name not in data_dependent_op_tests
)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
pass
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
except torch._subclasses.fake_tensor.DynamicOutputShapeException:
self.assertTrue(
name in dynamic_output_op_tests
or name in sometimes_dynamic_output_op_test
)
self.assertTrue(
fake_mode.shape_env is None
or not fake_mode.shape_env.allow_dynamic_output_shape_ops
or name not in supported_dynamic_output_op_tests
)
except torch._subclasses.fake_tensor.DataDependentOutputException:
self.assertTrue(name in data_dependent_op_tests)
run_with_fake_mode_and_verify(mode)
if name in supported_dynamic_output_op_tests:
run_with_fake_mode_and_verify(
allow_dynamic_output_shape_mode, match_results=False
)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_pointwise_ops(self, device, dtype, op):
name = op.name
if op.variant_test_name:
name += "." + op.variant_test_name
if name in fake_skips or "sparse" in name or "jiterator" in name:
self.skipTest("Skip failing test")
test_self = self
class TestPointwiseMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
out = func(*args, **kwargs)
if torch.Tag.pointwise in func.tags:
shapes = []
for inp in pytree.arg_tree_leaves(*args, **kwargs):
if isinstance(inp, torch.Tensor):
shapes.append(inp.shape)
out_shape = torch._refs._broadcast_shapes(*shapes)
for out_elem in pytree.tree_leaves(out):
if isinstance(out_elem, torch.Tensor):
test_self.assertEqual(out_elem.shape, out_shape)
return out
samples = op.sample_inputs(device, dtype, requires_grad=False)
for sample in samples:
mode = FakeTensorMode()
def map_to_fake(e):
if isinstance(e, torch.Tensor):
return mode.from_tensor(e)
else:
return e
input = tree_map(map_to_fake, sample.input)
args = tree_map(map_to_fake, sample.args)
kwargs = tree_map(map_to_fake, sample.kwargs)
try:
op(input, *args, **kwargs)
except Exception:
continue
with TestPointwiseMode():
with mode:
op(input, *args, **kwargs)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake(self, device, dtype, op):
self._test_fake_helper(device, dtype, op, contextlib.nullcontext)
@ops(op_db, dtypes=OpDTypes.any_one)
def test_fake_autocast(self, device, dtype, op):
device_type = torch.device(device).type
if op.name in fake_autocast_device_skips[device_type]:
self.skipTest("Skip failing test")
def context_fn():
return torch.amp.autocast(device_type)
self._test_fake_helper(device, dtype, op, context_fn)
def _test_fake_crossref_helper(self, device, dtype, op, context):
samples = op.sample_inputs(device, dtype, requires_grad=True)
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
# skip these to speed up tests
common_skip_ops = (
aten.detach.default,
aten.empty_strided.default,
aten.copy_.default,
aten.is_same_size.default,
)
# TODO: enable check_aliasing, batch norm fails
try:
with torch._subclasses.CrossRefFakeMode(
ignore_op_fn=lambda fn: fn in common_skip_ops, check_aliasing=True
):
with (
warnings.catch_warnings(),
context(),
torch.autograd.set_multithreading_enabled(False),
):
composite_compliance.compute_expected_grads(
op.get_op(),
args,
kwargs,
sample.output_process_fn_grad,
op.gradcheck_wrapper,
)
except torch._subclasses.fake_tensor.UnsupportedOperatorException:
pass
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
@skipOps(
"TestFakeTensor", "test_fake_crossref_backward_no_amp", fake_backward_xfails
)
def test_fake_crossref_backward_no_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, contextlib.nullcontext)
@onlyCUDA
@ops([op for op in op_db if op.supports_autograd], allowed_dtypes=(torch.float,))
@skipOps(
"TestFakeTensor",
"test_fake_crossref_backward_amp",
fake_backward_xfails | fake_autocast_backward_xfails,
)
def test_fake_crossref_backward_amp(self, device, dtype, op):
self._test_fake_crossref_helper(device, dtype, op, torch.cuda.amp.autocast)
@ops([op for op in ops_and_refs if op.is_factory_function])
def test_strided_layout(self, device, dtype, op):
samples = op.sample_inputs(device, dtype)
for sample in samples:
kwargs = sample.kwargs.copy()
kwargs["layout"] = torch.strided
strided_result = op(sample.input, *sample.args, **kwargs)
self.assertEqual(strided_result.layout, torch.strided)
| TestFakeTensor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.