language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/api/endpoints/project_commits.py | {
"start": 477,
"end": 1888
} | class ____(ProjectEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectReleasePermission,)
def get(self, request: Request, project) -> Response:
"""
List a Project's Commits
`````````````````````````
Retrieve a list of commits for a given project.
:pparam string organization_id_or_slug: the id or slug of the organization the
commit belongs to.
:pparam string project_id_or_slug: the id or slug of the project to list the
commits of.
:qparam string query: this parameter can be used to create a
"starts with" filter for the commit key.
"""
query = request.GET.get("query")
queryset = Commit.objects.filter(
organization_id=project.organization_id,
releasecommit__release__releaseproject__project_id=project.id,
)
if query:
queryset = queryset.filter(key__istartswith=query)
return self.paginate(
request=request,
queryset=queryset,
order_by=("key", "-date_added") if query else "-date_added",
on_results=lambda x: serialize(x, request.user),
paginator_cls=OffsetPaginator,
)
| ProjectCommitsEndpoint |
python | openai__openai-python | src/openai/types/beta/thread_create_params.py | {
"start": 5803,
"end": 6423
} | class ____(TypedDict, total=False):
vector_store_ids: SequenceNotStr[str]
"""
The
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
attached to this thread. There can be a maximum of 1 vector store attached to
the thread.
"""
vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
"""
A helper to create a
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
with file_ids and attach it to this thread. There can be a maximum of 1 vector
store attached to the thread.
"""
| ToolResourcesFileSearch |
python | huggingface__transformers | src/transformers/models/apertus/modular_apertus.py | {
"start": 9285,
"end": 11500
} | class ____(LlamaAttention):
def __init__(self, config: ApertusConfig, layer_idx: Optional[int] = None):
super().__init__(config, layer_idx)
self.q_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps)
self.k_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| ApertusAttention |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 7404,
"end": 10711
} | class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.MeanAbsolutePercentageError(name="mymape")
)
def test_all_correct_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mape_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 487.259, 3)
def test_sample_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 422.8888, 3)
def test_timestep_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mape_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 694.4444)
def test_zero_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0, 3)
def test_no_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [621.8518, 352.6666])
def test_mean_with_sample_weight_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(
reduction="mean_with_sample_weight"
)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 183.865)
def test_dtype_arg(self):
mape_obj = losses.MeanAbsolutePercentageError(dtype="bfloat16")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mape_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
| MeanAbsolutePercentageErrorTest |
python | keras-team__keras | keras/src/backend/common/backend_utils_test.py | {
"start": 2702,
"end": 3682
} | class ____(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test computation with 'valid' padding and no output padding"""
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=(1, 5, 5, 3),
kernel_shape=(3, 3, 3, 3),
strides=2,
padding="valid",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(jax_padding, [(2, 2), (2, 2)])
def test_same_padding_without_output_padding(self):
"""Test computation with 'same' padding and no output padding"""
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=(1, 5, 5, 3),
kernel_shape=(3, 3, 3, 3),
strides=2,
padding="same",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(jax_padding, [(2, 1), (2, 1)])
| ComputeConvTransposePaddingArgsForJAXTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 2276,
"end": 3161
} | class ____(IncrementalShopifyStreamWithDeletedEvents):
data_field = "orders"
deleted_events_api_name = "Order"
initial_limit = 250
def __init__(self, config: Mapping[str, Any]):
self._error_handler = LimitReducingErrorHandler(
max_retries=5,
error_mapping=DEFAULT_ERROR_MAPPING | ShopifyNonRetryableErrors("orders"),
)
super().__init__(config)
def request_params(self, stream_state=None, next_page_token=None, **kwargs):
params = super().request_params(stream_state=stream_state, next_page_token=next_page_token, **kwargs)
params["limit"] = self.initial_limit # Always start with the default limit; error handler will mutate on retry
if not next_page_token:
params["status"] = "any"
return params
def get_error_handler(self):
return self._error_handler
| Orders |
python | celery__celery | celery/concurrency/thread.py | {
"start": 521,
"end": 738
} | class ____:
def __init__(self, future: Future) -> None:
self.f = future
self.get = self.f.result
def wait(self, timeout: float | None = None) -> None:
wait([self.f], timeout)
| ApplyResult |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/test_engagements_calls.py | {
"start": 373,
"end": 5585
} | class ____(HubspotCRMSearchStream):
SCOPES = ["crm.objects.contacts.read"]
CURSOR_FIELD = "updatedAt"
STREAM_NAME = "engagements_calls"
OBJECT_TYPE = "calls"
ASSOCIATIONS = ["companies", "contacts", "deals", "tickets"]
OBJECT_ID = "12345"
@HttpMocker()
def test_given_records_when_read_extract_desired_records(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_oauth=True, with_dynamic_schemas=False, entities=OBJECTS_WITH_DYNAMIC_SCHEMA)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.oauth_config(), self.STREAM_NAME, SyncMode.incremental)
assert len(output.records) == 1
@HttpMocker()
def test_given_one_page_when_read_stream_private_token_then_return_records(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
assert len(output.records) == 1
@HttpMocker()
def test_given_error_response_when_read_analytics_then_get_trace_message(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), HttpResponse(status_code=500, body="{}"), method="post")
with mock.patch("time.sleep"):
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
assert len(output.records) == 0
assert len(output.trace_messages) > 0
assert len(output.errors) > 0
@HttpMocker()
def test_given_500_then_200_when_read_then_return_records(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
# First attempt 500, then success (both POST)
self.mock_response(http_mocker, self.request(), [HttpResponse(status_code=500, body="{}"), self.response()], method="post")
# Associations will be called only after the 200 response
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
with mock.patch("time.sleep"):
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
assert len(output.records) == 1
assert len(output.trace_messages) > 0
assert len(output.errors) == 0
@HttpMocker()
def test_given_missing_scopes_error_when_read_then_stop_sync(self, http_mocker: HttpMocker):
self.mock_oauth(http_mocker, self.ACCESS_TOKEN)
self.mock_custom_objects_streams(http_mocker)
self.read_from_stream(self.oauth_config(), self.STREAM_NAME, SyncMode.full_refresh, expecting_exception=True)
@HttpMocker()
def test_given_unauthorized_error_when_read_then_stop_sync(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), HttpResponse(status_code=http.HTTPStatus.UNAUTHORIZED, body="{}"), method="post")
with mock.patch("time.sleep"):
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
assert len(output.records) == 0
assert len(output.trace_messages) > 0
assert len(output.errors) > 0
@HttpMocker()
def test_given_one_page_when_read_then_get_records_with_flattened_properties(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
record = output.records[0].record.data
assert "properties" in record # legacy struct remains
prop_fields = len([f for f in record if f.startswith("properties_")])
assert prop_fields > 0
@HttpMocker()
def test_given_incremental_sync_when_read_then_state_message_produced_and_state_match_latest_record(self, http_mocker: HttpMocker):
self._set_up_requests(http_mocker, with_dynamic_schemas=False)
self.mock_response(http_mocker, self.request(), self.response(), method="post")
self._mock_all_associations_for_ids(http_mocker, parent_entity=self.OBJECT_TYPE, record_ids=[self.OBJECT_ID])
output = self.read_from_stream(self.private_token_config(self.ACCESS_TOKEN), self.STREAM_NAME, SyncMode.incremental)
assert len(output.state_messages) == 2
| TestEngagementCallsStream |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 39567,
"end": 39650
} | class ____(dtypes.extended):
pass
@dataclasses.dataclass(frozen=True)
| barrier_dtype |
python | huggingface__transformers | src/transformers/quantizers/quantizer_quanto.py | {
"start": 1057,
"end": 6147
} | class ____(HfQuantizer):
"""
Quantizer for the quanto library
"""
required_packages = ["quanto", "accelerate"]
requires_parameters_quantization = True
requires_calibration = False
def __init__(self, quantization_config: QuantoConfig, **kwargs):
super().__init__(quantization_config, **kwargs)
self.post_init()
def post_init(self):
r"""
Safety checker
"""
if self.quantization_config.activations is not None and not self.pre_quantized:
raise ValueError(
"We don't support quantizing the activations with transformers library."
"Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training."
)
def validate_environment(self, *args, **kwargs):
if not is_optimum_quanto_available():
raise ImportError(
"Loading an optimum-quanto quantized model requires optimum-quanto library (`pip install optimum-quanto`)"
)
if not is_accelerate_available():
raise ImportError(
"Loading an optimum-quanto quantized model requires accelerate library (`pip install accelerate`)"
)
def update_device_map(self, device_map):
if device_map is None:
device_map = {"": "cpu"}
logger.info(
"The device_map was not initialized. "
"Setting device_map to {'':'cpu'}. "
"If you want to use the model for inference, please set device_map ='auto'"
)
return device_map
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
logger.info("You did not specify `dtype` in `from_pretrained`. Setting it to `torch.float32`.")
dtype = torch.float32
return dtype
def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]:
if is_optimum_quanto_available():
from optimum.quanto import QModuleMixin
not_missing_keys = []
for name, module in model.named_modules():
if isinstance(module, QModuleMixin):
for missing in missing_keys:
if (
(name in missing or name in f"{prefix}.{missing}")
and not missing.endswith(".weight")
and not missing.endswith(".bias")
):
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
if is_optimum_quanto_available():
from optimum.quanto import QModuleMixin
module, tensor_name = get_module_from_name(model, param_name)
# We only quantize the weights and the bias is not quantized.
if isinstance(module, QModuleMixin) and "weight" in tensor_name:
# if the weights are quantized, don't need to recreate it again with `create_quantized_param`
return not module.frozen
else:
return False
def adjust_max_memory(self, max_memory: dict[str, int | str]) -> dict[str, int | str]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
**kwargs,
):
from ..modeling_utils import _load_parameter_into_model
_load_parameter_into_model(model, param_name, param_value.to(target_device))
module, _ = get_module_from_name(model, param_name)
module.freeze()
module.weight.requires_grad = False
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
from accelerate.utils import CustomDtype
mapping = {
"int8": torch.int8,
"float8": CustomDtype.FP8,
"int4": CustomDtype.INT4,
"int2": CustomDtype.INT2,
}
target_dtype = mapping[self.quantization_config.weights]
return target_dtype
def _process_model_before_weight_loading(
self, model: "PreTrainedModel", keep_in_fp32_modules: list[str] | None = None, **kwargs
):
from ..integrations import replace_with_quanto_layers
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model, _ = replace_with_quanto_layers(
model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
)
model.config.quantization_config = self.quantization_config
@property
def is_trainable(self) -> bool:
return True
def is_serializable(self, safe_serialization=None):
return False
| QuantoHfQuantizer |
python | streamlit__streamlit | lib/tests/streamlit/elements/markdown_test.py | {
"start": 14068,
"end": 15669
} | class ____(DeltaGeneratorTestCase):
"""Test st.caption text_alignment parameter."""
@parameterized.expand(
[
("left", 1),
("center", 2),
("right", 3),
("justify", 4),
(None, 1), # Default case
]
)
def test_st_caption_text_alignment(
self, text_alignment: str | None, expected_alignment: int
):
"""Test st.caption with various text_alignment values.
Parameters
----------
text_alignment : str | None
The text alignment value to test, or None for default behavior.
expected_alignment : int
The expected protobuf alignment enum value.
"""
if text_alignment is None:
st.caption("Caption text")
else:
st.caption("Caption text", text_alignment=text_alignment)
el = self.get_delta_from_queue().new_element
assert el.markdown.body == "Caption text"
assert el.markdown.is_caption is True
assert el.text_alignment_config.alignment == expected_alignment
def test_st_caption_text_alignment_invalid(self):
"""Test st.caption with invalid text_alignment raises error."""
with pytest.raises(StreamlitAPIException) as exc:
st.caption("Caption text", text_alignment="top")
assert 'Invalid text_alignment value: "top"' in str(exc.value)
assert "left" in str(exc.value)
assert "center" in str(exc.value)
assert "right" in str(exc.value)
assert "justify" in str(exc.value)
| StCaptionTextAlignmentTest |
python | pytorch__pytorch | torch/ao/quantization/pt2e/representation/rewrite.py | {
"start": 19365,
"end": 28387
} | class ____:
"""Data needed for rewrite, this includes example inputs, pattern and replacement functions
and post transformation functions for the exported pattern and replacement GraphModule
"""
# example inputs used for exporting the pattern into GraphModule
example_inputs: tuple[Any, ...]
pattern: Callable
replacement: Callable
# post transformation on the exported pattern and replacement GraphModule
pattern_post_trans: Callable[[GraphModule], GraphModule] | None = None
replacement_post_trans: Callable[[GraphModule], GraphModule] | None = None
def reference_representation_rewrite(model: GraphModule) -> GraphModule:
_QUANTIZED_LINEAR_EXAMPLE_INPUTS = (
torch.randint(-128, 127, (2, 5), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
torch.randint(-128, 127, (5, 5), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-127], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
torch.randn(1, dtype=torch.float),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
)
_DYNAMIC_QUANTIZED_LINEAR_EXAMPLE_INPUTS = (
torch.randn((2, 5), dtype=torch.float),
-128,
127,
torch.finfo(torch.float32).eps,
torch.randint(-128, 127, (5, 5), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-127], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
torch.randn(1, dtype=torch.float),
)
_QUANTIZED_CONV2d_EXAMPLE_INPUTS = (
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-127], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
torch.randn(1, dtype=torch.float),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
)
_QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS = (
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
)
_QUANTIZED_MAX_POOL2D_EXAMPLE_INPUTS = (
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
)
_QUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS = (
torch.randn(1, 3, 3, 3, dtype=torch.float),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
)
_DEQUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS = (
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(1, dtype=torch.float),
torch.zeros(1, dtype=torch.int),
torch.tensor([-128], dtype=torch.int),
torch.tensor([127], dtype=torch.int),
)
_QUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS = (
torch.randn(1, 3, 3, 3, dtype=torch.float),
torch.randn(3, dtype=torch.float),
torch.zeros(3, dtype=torch.int),
1,
-128,
127,
)
_DEQUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS = (
torch.randint(-128, 127, (1, 3, 3, 3), dtype=torch.int8),
torch.randn(3, dtype=torch.float),
torch.zeros(3, dtype=torch.int),
1,
-128,
127,
)
_REWRITE_INFO_LIST = [
_RewriteInfo(
_DYNAMIC_QUANTIZED_LINEAR_EXAMPLE_INPUTS,
_WrapperModule(_qdq_dynamic_quantized_linear),
_WrapperModule(_reference_dynamic_quantized_linear),
partial(
_replace_literals_with_existing_placeholders,
literal_to_ph_idx={-128: 1, 127: 2, torch.finfo(torch.float32).eps: 3},
),
partial(
_replace_literals_with_existing_placeholders,
literal_to_ph_idx={-128: 1, 127: 2, torch.finfo(torch.float32).eps: 3},
),
),
_RewriteInfo(
_QUANTIZED_LINEAR_EXAMPLE_INPUTS,
_WrapperModule(_qdq_quantized_linear),
_WrapperModule(_reference_quantized_linear),
_replace_literals_with_new_placeholders,
_replace_literals_with_new_placeholders,
),
_RewriteInfo(
_QUANTIZED_CONV2d_EXAMPLE_INPUTS,
_WrapperModule(_qdq_quantized_conv2d),
_WrapperModule(_reference_quantized_conv2d),
partial(_replace_literals_with_new_placeholders, exclude_literals=[-1]),
partial(_replace_literals_with_new_placeholders, exclude_literals=[-1]),
),
_RewriteInfo(
_QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS,
_WrapperModule(_qdq_quantized_add_relu),
_WrapperModule(_reference_quantized_add_relu),
),
_RewriteInfo(
_QUANTIZED_ADD_OR_ADD_RELU_EXAMPLE_INPUTS,
_WrapperModule(_qdq_quantized_add),
_WrapperModule(_reference_quantized_add),
),
_RewriteInfo(
_QUANTIZED_MAX_POOL2D_EXAMPLE_INPUTS,
_WrapperModule(_qdq_quantized_max_pool2d),
_WrapperModule(_reference_quantized_max_pool2d),
_replace_literals_with_new_placeholders,
_replace_literals_with_new_placeholders,
),
_RewriteInfo(
_QUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS,
_WrapperModule(_quantize_per_tensor_int8),
_WrapperModule(_reference_quantize_per_tensor_int8),
),
_RewriteInfo(
_DEQUANTIZE_PER_TENSOR_INT8_EXAMPLE_INPUTS,
_WrapperModule(_dequantize_per_tensor_int8),
_WrapperModule(_reference_dequantize_per_tensor_int8),
),
_RewriteInfo(
_QUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS,
_WrapperModule(_quantize_per_channel_int8),
_WrapperModule(_reference_quantize_per_channel_int8),
_replace_ph_qdq_per_channel_replacement,
_replace_ph_qdq_per_channel_replacement,
),
_RewriteInfo(
_DEQUANTIZE_PER_CHANNEL_INT8_EXAMPLE_INPUTS,
_WrapperModule(_dequantize_per_channel_int8),
_WrapperModule(_reference_dequantize_per_channel_int8),
_replace_ph_qdq_per_channel_replacement,
_replace_ph_qdq_per_channel_replacement,
),
]
remove_tensor_overload_for_qdq_ops(model)
with _disable_aten_to_metadata_assertions():
for rewrite_info in _REWRITE_INFO_LIST:
example_inputs = rewrite_info.example_inputs
pattern = rewrite_info.pattern
replacement = rewrite_info.replacement
pattern_post_trans = rewrite_info.pattern_post_trans
replacement_post_trans = rewrite_info.replacement_post_trans
pattern = _get_aten_graph_module_for_pattern(pattern, example_inputs) # type: ignore[arg-type, assignment]
remove_tensor_overload_for_qdq_ops(pattern) # type: ignore[arg-type]
replacement = _get_aten_graph_module_for_pattern( # type: ignore[assignment]
replacement,
example_inputs, # type: ignore[arg-type]
)
remove_tensor_overload_for_qdq_ops(replacement) # type: ignore[arg-type]
if pattern_post_trans:
pattern = pattern_post_trans(pattern)
if replacement_post_trans:
replacement = replacement_post_trans(replacement)
pattern.recompile() # type: ignore[attr-defined]
replacement.recompile() # type: ignore[attr-defined]
replace_pattern(model, pattern, replacement)
return model
| _RewriteInfo |
python | Textualize__textual | src/textual/demo/game.py | {
"start": 9725,
"end": 17007
} | class ____(containers.Vertical, can_focus=True):
"""Widget for the game board."""
ALLOW_MAXIMIZE = False
DEFAULT_CSS = """
Game {
visibility: hidden;
align: center middle;
hatch: right $panel;
border: heavy transparent;
&:focus {
border: heavy $success;
}
#grid {
border: heavy $primary;
hatch: right $panel;
box-sizing: content-box;
}
Digits {
width: auto;
color: $foreground;
}
}
"""
BINDINGS = [
Binding("up", "move('up')", "up", priority=True),
Binding("down", "move('down')", "down", priority=True),
Binding("left", "move('left')", "left", priority=True),
Binding("right", "move('right')", "right", priority=True),
]
state = reactive("waiting")
play_start_time: reactive[float] = reactive(monotonic)
play_time = reactive(0.0, init=False)
code = reactive("")
dimensions = reactive(Size(3, 3))
code = reactive("")
language = reactive("")
def __init__(
self,
code: str,
language: str,
dimensions: tuple[int, int],
tile_size: tuple[int, int],
) -> None:
self.set_reactive(Game.code, code)
self.set_reactive(Game.language, language)
self.locations: defaultdict[Offset, int | None] = defaultdict(None)
super().__init__()
self.dimensions = Size(*dimensions)
self.tile_size = Size(*tile_size)
self.play_timer: Timer | None = None
def check_win(self) -> bool:
return all(tile.start_position == tile.position for tile in self.query(Tile))
def watch_dimensions(self, dimensions: Size) -> None:
self.locations.clear()
tile_width, tile_height = dimensions
for last, tile_no in loop_last(range(0, tile_width * tile_height)):
position = Offset(*divmod(tile_no, tile_width))
self.locations[position] = None if last else tile_no
def compose(self) -> ComposeResult:
syntax = Syntax(
self.code,
self.language.lower(),
indent_guides=True,
line_numbers=True,
theme="material",
)
tile_width, tile_height = self.dimensions
self.state = "waiting"
yield Digits("")
with containers.HorizontalGroup(id="grid") as grid:
grid.styles.width = tile_width * self.tile_size[0]
grid.styles.height = tile_height * self.tile_size[1]
for row, column in product(range(tile_width), range(tile_height)):
position = Offset(row, column)
tile_no = self.locations[position]
yield Tile(syntax, tile_no, self.tile_size, position)
if self.language:
self.call_after_refresh(self.shuffle)
def update_clock(self) -> None:
if self.state == "playing":
elapsed = monotonic() - self.play_start_time
self.play_time = elapsed
def watch_play_time(self, play_time: float) -> None:
minutes, seconds = divmod(play_time, 60)
hours, minutes = divmod(minutes, 60)
self.query_one(Digits).update(f"{hours:02,.0f}:{minutes:02.0f}:{seconds:04.1f}")
def watch_state(self, old_state: str, new_state: str) -> None:
if self.play_timer is not None:
self.play_timer.stop()
if new_state == "playing":
self.play_start_time = monotonic()
self.play_timer = self.set_interval(1 / 10, self.update_clock)
def get_tile(self, tile: int | None) -> Tile:
"""Get a tile (int) or the blank (None)."""
return self.query_one("#blank" if tile is None else f"#tile{tile}", Tile)
def get_tile_at(self, position: Offset) -> Tile:
"""Get a tile at the given position, or raise an IndexError."""
if position not in self.locations:
raise IndexError("No tile")
return self.get_tile(self.locations[position])
def move_tile(self, tile_no: int | None) -> None:
"""Move a tile to the blank.
Note: this doesn't do any validation of legal moves.
"""
tile = self.get_tile(tile_no)
blank = self.get_tile(None)
blank_position = blank.position
self.locations[tile.position] = None
blank.position = tile.position
self.locations[blank_position] = tile_no
tile.position = blank_position
if self.state == "playing" and self.check_win():
self.state = "won"
self.notify("You won!", title="Sliding Tile Puzzle")
def can_move(self, tile: int) -> bool:
"""Check if a tile may move."""
blank_position = self.get_tile(None).position
tile_position = self.get_tile(tile).position
return blank_position in (
tile_position + (1, 0),
tile_position - (1, 0),
tile_position + (0, 1),
tile_position - (0, 1),
)
def action_move(self, direction: str) -> None:
if self.state != "playing":
self.app.bell()
return
blank = self.get_tile(None).position
if direction == "up":
position = blank + (0, +1)
elif direction == "down":
position = blank + (0, -1)
elif direction == "left":
position = blank + (+1, 0)
elif direction == "right":
position = blank + (-1, 0)
try:
tile = self.get_tile_at(position)
except IndexError:
return
self.move_tile(tile.tile)
def get_legal_moves(self) -> set[Offset]:
"""Get the positions of all tiles that can move."""
blank = self.get_tile(None).position
moves: list[Offset] = []
DIRECTIONS = [(-1, 0), (+1, -0), (0, -1), (0, +1)]
moves = [
blank + direction
for direction in DIRECTIONS
if (blank + direction) in self.locations
]
return {self.get_tile_at(position).position for position in moves}
@work(exclusive=True)
async def shuffle(self, shuffles: int = 150) -> None:
"""A worker to do the shuffling."""
self.visible = True
if self.play_timer is not None:
self.play_timer.stop()
self.query_one("#grid").border_title = "[reverse bold] SHUFFLING - Please Wait "
self.state = "shuffling"
previous_move: Offset = Offset(-1, -1)
for _ in range(shuffles):
legal_moves = self.get_legal_moves()
legal_moves.discard(previous_move)
previous_move = self.get_tile(None).position
move_position = choice(list(legal_moves))
move_tile = self.get_tile_at(move_position)
self.move_tile(move_tile.tile)
await sleep(0.05)
self.query_one("#grid").border_title = ""
self.state = "playing"
@on(events.Click, ".tile")
def on_tile_clicked(self, event: events.Click) -> None:
assert event.widget is not None
tile = int(event.widget.name or 0)
if self.state != "playing" or not self.can_move(tile):
self.app.bell()
return
self.move_tile(tile)
| Game |
python | pytorch__pytorch | test/test_throughput_benchmark.py | {
"start": 180,
"end": 654
} | class ____(torch.jit.ScriptModule):
def __init__(self, D_in, H, D_out):
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(2 * H, D_out)
@torch.jit.script_method
def forward(self, x1, x2):
h1_relu = self.linear1(x1).clamp(min=0)
h2_relu = self.linear1(x2).clamp(min=0)
cat = torch.cat((h1_relu, h2_relu), 1)
y_pred = self.linear2(cat)
return y_pred
| TwoLayerNet |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 18317,
"end": 18523
} | class ____(GenericType):
"""Concatenate params and ParamSpec."""
@property
def args(self):
return self.parameters[:-1]
@property
def paramspec(self):
return self.parameters[-1]
| Concatenate |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/sensor.py | {
"start": 868,
"end": 984
} | class ____(BaseModel):
"""GET /api/sensors response."""
items: list[DgApiSensor]
total: int
| DgApiSensorList |
python | dask__distributed | distributed/core.py | {
"start": 1682,
"end": 2380
} | class ____(Enum):
"""
This Enum contains the various states a cluster, worker, scheduler and nanny can be
in. Some of the status can only be observed in one of cluster, nanny, scheduler or
worker but we put them in the same Enum as they are compared with each
other.
"""
undefined = "undefined"
created = "created"
init = "init"
starting = "starting"
running = "running"
paused = "paused"
stopping = "stopping"
stopped = "stopped"
closing = "closing"
closing_gracefully = "closing_gracefully"
closed = "closed"
failed = "failed"
dont_reply = "dont_reply"
Status.lookup = {s.name: s for s in Status} # type: ignore
| Status |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/W29.py | {
"start": 43,
"end": 392
} | class ____(object):
bang = 12
#: W291:2:35
'''multiline
string with trailing whitespace'''
#: W291 W292 noeol
x = 1
#: W191 W292 noeol
if False:
pass # indented with tabs
#: W292:1:36 noeol
# This line doesn't have a linefeed
#: W292:1:5 E225:1:2 noeol
1+ 1
#: W292:1:27 E261:1:12 noeol
import this # no line feed
#: W292:3:22 noeol
| Foo |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_group_search_view_details.py | {
"start": 5514,
"end": 11164
} | class ____(BaseGSVTestCase):
endpoint = "sentry-api-0-organization-group-search-view-details"
method = "delete"
def setUp(self) -> None:
self.base_data = self.create_base_data()
# For most tests, we'll be deleting views from user_2 (no special permissions)
self.login_as(user=self.user_2)
self.user_1_view_id = str(self.base_data["user_one_views"][0].id)
self.user_2_view_id = str(self.base_data["user_two_views"][0].id)
self.user_1_view_url = reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"view_id": self.user_1_view_id,
},
)
self.user_2_view_url = reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"view_id": self.user_2_view_id,
},
)
def test_delete_view_success(self) -> None:
response = self.client.delete(self.user_2_view_url)
assert response.status_code == 204
# Verify the view was deleted
assert not GroupSearchView.objects.filter(id=self.user_2_view_id).exists()
# Verify other views still exist
remaining_views = GroupSearchView.objects.filter(
organization=self.organization, user_id=self.user_2.id
)
assert remaining_views.count() == 1
def test_delete_nonexistent_view(self) -> None:
"""Test that attempting to delete a nonexistent view returns 404."""
nonexistent_id = "99999"
url = reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={"organization_id_or_slug": self.organization.slug, "view_id": nonexistent_id},
)
response = self.client.delete(url)
assert response.status_code == 404
def test_delete_view_from_another_user(self) -> None:
view_id = str(self.base_data["user_one_views"][0].id)
url = reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={"organization_id_or_slug": self.organization.slug, "view_id": view_id},
)
response = self.client.delete(url)
assert response.status_code == 403
# Verify the view still exists (this will error out if not)
GroupSearchView.objects.get(id=view_id)
def test_superuser_can_delete_view_from_another_user(self) -> None:
# User 1 is a superuser
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"view_id": self.user_2_view_id,
},
)
response = self.client.delete(url)
assert response.status_code == 204
assert not GroupSearchView.objects.filter(id=self.user_2_view_id).exists()
def test_org_write_can_delete_view_from_another_user(self) -> None:
self.admin_user = self.create_user()
self.create_member(
user=self.admin_user,
organization=self.organization,
role="manager",
)
self.login_as(user=self.admin_user)
response = self.client.delete(self.user_1_view_url)
assert response.status_code == 204
assert not GroupSearchView.objects.filter(id=self.user_1_view_id).exists()
def test_delete_first_starred_view_decrements_succeeding_positions(self) -> None:
# Delete the first view
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"view_id": self.user_1_view_id,
},
)
response = self.client.delete(url)
assert response.status_code == 204
assert (
GroupSearchViewStarred.objects.filter(
organization_id=self.organization.id, user_id=self.user.id
).count()
== 2
)
# All succeeeding views should have their position decremented
for idx, gsv in enumerate(
GroupSearchViewStarred.objects.filter(
organization_id=self.organization.id, user_id=self.user.id
)
):
assert self.base_data["user_one_views"][idx + 1].id == gsv.group_search_view.id
assert gsv.position == idx
def test_delete_last_starred_view_does_not_decrement_positions(self) -> None:
# Delete the last view
self.login_as(user=self.user)
response = self.client.delete(
reverse(
"sentry-api-0-organization-group-search-view-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"view_id": self.base_data["user_one_views"][-1].id,
},
)
)
assert response.status_code == 204
assert (
GroupSearchViewStarred.objects.filter(
organization_id=self.organization.id, user_id=self.user.id
).count()
== 2
)
for idx, gsv in enumerate(
GroupSearchViewStarred.objects.filter(
organization_id=self.organization.id, user_id=self.user.id
)
):
assert self.base_data["user_one_views"][idx].id == gsv.group_search_view.id
| OrganizationGroupSearchViewsDeleteTest |
python | great-expectations__great_expectations | great_expectations/checkpoint/actions.py | {
"start": 17954,
"end": 20481
} | class ____(ValidationAction):
"""Sends a PagerDuty event.
```yaml
- name: send_pagerduty_alert_on_validation_result
action:
class_name: PagerdutyAlertAction
api_key: ${pagerduty_api_key}
routing_key: ${pagerduty_routing_key}
notify_on: failure
severity: critical
```
Args:
api_key: Events API v2 key for pagerduty.
routing_key: The 32 character Integration Key for an integration on a service or on a global ruleset.
notify_on: Specifies validation status that triggers notification. One of "all", "failure", "success".
severity: The PagerDuty severity levels determine the level of urgency. One of "critical", "error", "warning", or "info".
""" # noqa: E501 # FIXME CoP
type: Literal["pagerduty"] = "pagerduty"
api_key: str
routing_key: str
notify_on: NotifyOn = "failure"
severity: Literal["critical", "error", "warning", "info"] = "critical"
@override
def run(
self, checkpoint_result: CheckpointResult, action_context: ActionContext | None = None
) -> dict:
success = checkpoint_result.success or False
checkpoint_name = checkpoint_result.checkpoint_config.name
summary = f"Great Expectations Checkpoint {checkpoint_name} has "
if success:
summary += "succeeded"
else:
summary += "failed"
max_severity = self._get_max_severity_failure_from_checkpoint_result(checkpoint_result)
return self._run_pypd_alert(
dedup_key=checkpoint_name, message=summary, success=success, max_severity=max_severity
)
def _run_pypd_alert(
self,
dedup_key: str,
message: str,
success: bool,
max_severity: Optional[FailureSeverity] = None,
):
if should_notify(success=success, notify_on=self.notify_on, max_severity=max_severity):
pypd.api_key = self.api_key
pypd.EventV2.create(
data={
"routing_key": self.routing_key,
"dedup_key": dedup_key,
"event_action": "trigger",
"payload": {
"summary": message,
"severity": self.severity,
"source": "Great Expectations",
},
}
)
return {"pagerduty_alert_result": "success"}
return {"pagerduty_alert_result": "none sent"}
@public_api
| PagerdutyAlertAction |
python | spyder-ide__spyder | spyder/api/asyncdispatcher.py | {
"start": 17655,
"end": 17837
} | class ____(QObject):
"""Executor to run callbacks in the main Qt loop."""
def customEvent(self, e: _QCallbackEvent): # noqa: N802, PLR6301
e.func()
| _QCallbackExecutor |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 90519,
"end": 90838
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)
self.bn = nn.BatchNorm2d(2).to(dtype=torch.float)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
| SubModelForFusion |
python | getsentry__sentry | src/sentry/notifications/platform/target.py | {
"start": 4305,
"end": 5598
} | class ____:
"""
A wrapper class that handles serialization/deserialization of NotificationTargets.
"""
target: NotificationTarget
@property
def notification_type(self) -> NotificationTargetType:
if isinstance(self.target, IntegrationNotificationTarget):
return NotificationTargetType.INTEGRATION
elif isinstance(self.target, GenericNotificationTarget):
return NotificationTargetType.GENERIC
else:
raise NotificationTargetError(f"Unknown target type: {type(self.target)}")
def to_dict(self) -> dict[str, Any]:
return {
"type": self.notification_type,
"target": self.target.to_dict(),
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "NotificationTargetDto":
target_type = data["type"]
target_data = data["target"]
if target_type == NotificationTargetType.GENERIC:
target = GenericNotificationTarget.from_dict(target_data)
elif target_type == NotificationTargetType.INTEGRATION:
target = IntegrationNotificationTarget.from_dict(target_data)
else:
raise NotificationTargetError(f"Unknown target type: {target_type}")
return cls(target=target)
| NotificationTargetDto |
python | docker__docker-py | tests/integration/api_client_test.py | {
"start": 144,
"end": 478
} | class ____(BaseAPIIntegrationTest):
def test_version(self):
res = self.client.version()
assert 'GoVersion' in res
assert 'Version' in res
def test_info(self):
res = self.client.info()
assert 'Containers' in res
assert 'Images' in res
assert 'Debug' in res
| InformationTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP049_1.py | {
"start": 121,
"end": 164
} | class ____[_T = int]:
var: _T
# tuple
| Foo |
python | dagster-io__dagster | python_modules/dagster/dagster/components/core/component_tree.py | {
"start": 20458,
"end": 21781
} | class ____(ComponentTree):
"""Variant of ComponentTree that is used for testing purposes. Mocks out the
definitions module name and path.
"""
@staticmethod
def for_test() -> "TestComponentTree":
"""Convenience method for creating a ComponentTree for testing purposes."""
return TestComponentTree(
defs_module=mock.Mock(),
project_root=Path.cwd(),
)
@property
def defs_module_name(self) -> str:
return "test"
@property
def defs_module_path(self) -> Path:
return Path.cwd()
@property
def decl_load_context(self):
return ComponentDeclLoadContext(
component_path=ComponentPath.from_path(self.defs_module_path),
project_root=self.project_root,
defs_module_path=self.defs_module_path,
defs_module_name=self.defs_module_name,
resolution_context=ResolutionContext.default(),
terminate_autoloading_on_keyword_files=True,
component_tree=self,
)
@property
def load_context(self):
component_decl = mock.Mock()
component_decl.iterate_child_component_decls = mock.Mock(return_value=[])
return ComponentLoadContext.from_decl_load_context(self.decl_load_context, component_decl)
| TestComponentTree |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 76103,
"end": 78823
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
cran: Optional[RCranLibrary] = Field(
None, description="If cran, specification of a CRAN library to be installed."
)
egg: Optional[str] = Field(
None,
description=(
"If egg, URI of the egg to be installed. DBFS and S3 URIs are supported."
' For example: `{ "egg": "dbfs:/my/egg" }` or `{ "egg":'
' "s3://my-bucket/egg" }`. If S3 is used, make sure the cluster has read'
" access on the library. You may need to launch the cluster with an"
" instance profile to access the S3 URI."
),
examples=["dbfs:/my/egg"],
)
jar: Optional[str] = Field(
None,
description=(
"If jar, URI of the JAR to be installed. DBFS and S3 URIs are supported."
' For example: `{ "jar": "dbfs:/mnt/databricks/library.jar" }` or `{ "jar":'
' "s3://my-bucket/library.jar" }`. If S3 is used, make sure the cluster has'
" read access on the library. You may need to launch the cluster with an"
" instance profile to access the S3 URI."
),
examples=["dbfs:/my-jar.jar"],
)
maven: Optional[MavenLibrary] = Field(
None,
description=(
"If maven, specification of a Maven library to be installed. For example:"
' `{ "coordinates": "org.jsoup:jsoup:1.7.2" }`'
),
)
pypi: Optional[PythonPyPiLibrary] = Field(
None,
description=(
"If pypi, specification of a PyPI library to be installed. Specifying the"
" `repo` field is optional and if not specified, the default pip index is"
' used. For example: `{ "package": "simplejson", "repo":'
' "https://my-repo.com" }`'
),
)
whl: Optional[str] = Field(
None,
description=(
"If whl, URI of the wheel or zipped wheels to be installed. DBFS and S3"
' URIs are supported. For example: `{ "whl": "dbfs:/my/whl" }` or `{ "whl":'
' "s3://my-bucket/whl" }`. If S3 is used, make sure the cluster has read'
" access on the library. You may need to launch the cluster with an"
" instance profile to access the S3 URI. Also the wheel file name needs to"
" use the [correct"
" convention](https://www.python.org/dev/peps/pep-0427/#file-format). If"
" zipped wheels are to be installed, the file name suffix should be"
" `.wheelhouse.zip`."
),
examples=["dbfs:/my/whl"],
)
| Library |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator.py | {
"start": 52139,
"end": 62340
} | class ____(type_spec.BatchableTypeSpec):
"""A tf.TypeSpec for `LinearOperator` objects."""
__slots__ = ("_param_specs", "_non_tensor_params", "_prefer_static_fields")
def __init__(self, param_specs, non_tensor_params, prefer_static_fields):
"""Initializes a new `_LinearOperatorSpec`.
Args:
param_specs: Python `dict` of `tf.TypeSpec` instances that describe
kwargs to the `LinearOperator`'s constructor that are `Tensor`-like or
`CompositeTensor` subclasses.
non_tensor_params: Python `dict` containing non-`Tensor` and non-
`CompositeTensor` kwargs to the `LinearOperator`'s constructor.
prefer_static_fields: Python `tuple` of strings corresponding to the names
of `Tensor`-like args to the `LinearOperator`s constructor that may be
stored as static values, if known. These are typically shapes, indices,
or axis values.
"""
self._param_specs = param_specs
self._non_tensor_params = non_tensor_params
self._prefer_static_fields = prefer_static_fields
@classmethod
def from_operator(cls, operator):
"""Builds a `_LinearOperatorSpec` from a `LinearOperator` instance.
Args:
operator: An instance of `LinearOperator`.
Returns:
linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as
the `TypeSpec` of `operator`.
"""
validation_fields = ("is_non_singular", "is_self_adjoint",
"is_positive_definite", "is_square")
kwargs = _extract_attrs(
operator,
keys=set(operator._composite_tensor_fields + validation_fields)) # pylint: disable=protected-access
non_tensor_params = {}
param_specs = {}
for k, v in list(kwargs.items()):
type_spec_or_v = _extract_type_spec_recursively(v)
is_tensor = [isinstance(x, type_spec.TypeSpec)
for x in nest.flatten(type_spec_or_v)]
if all(is_tensor):
param_specs[k] = type_spec_or_v
elif not any(is_tensor):
non_tensor_params[k] = v
else:
raise NotImplementedError(f"Field {k} contains a mix of `Tensor` and "
f" non-`Tensor` values.")
return cls(
param_specs=param_specs,
non_tensor_params=non_tensor_params,
prefer_static_fields=operator._composite_tensor_prefer_static_fields) # pylint: disable=protected-access
def _to_components(self, obj):
return _extract_attrs(obj, keys=list(self._param_specs))
def _from_components(self, components):
kwargs = dict(self._non_tensor_params, **components)
return self.value_type(**kwargs)
@property
def _component_specs(self):
return self._param_specs
def _serialize(self):
return (self._param_specs,
self._non_tensor_params,
self._prefer_static_fields)
def _copy(self, **overrides):
kwargs = {
"param_specs": self._param_specs,
"non_tensor_params": self._non_tensor_params,
"prefer_static_fields": self._prefer_static_fields
}
kwargs.update(overrides)
return type(self)(**kwargs)
def _batch(self, batch_size):
"""Returns a TypeSpec representing a batch of objects with this TypeSpec."""
return self._copy(
param_specs=nest.map_structure(
lambda spec: spec._batch(batch_size), # pylint: disable=protected-access
self._param_specs))
def _unbatch(self, batch_size):
"""Returns a TypeSpec representing a single element of this TypeSpec."""
return self._copy(
param_specs=nest.map_structure(
lambda spec: spec._unbatch(), # pylint: disable=protected-access
self._param_specs))
def make_composite_tensor(cls, module_name="tf.linalg"):
"""Class decorator to convert `LinearOperator`s to `CompositeTensor`."""
spec_name = "{}Spec".format(cls.__name__)
spec_type = type(spec_name, (_LinearOperatorSpec,), {"value_type": cls})
type_spec_registry.register("{}.{}".format(module_name, spec_name))(spec_type)
cls._type_spec = property(spec_type.from_operator) # pylint: disable=protected-access
return cls
def _extract_attrs(op, keys):
"""Extract constructor kwargs to reconstruct `op`.
Args:
op: A `LinearOperator` instance.
keys: A Python `tuple` of strings indicating the names of the constructor
kwargs to extract from `op`.
Returns:
kwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`.
"""
kwargs = {}
not_found = object()
for k in keys:
srcs = [
getattr(op, k, not_found), getattr(op, "_" + k, not_found),
getattr(op, "parameters", {}).get(k, not_found),
]
if any(v is not not_found for v in srcs):
kwargs[k] = [v for v in srcs if v is not not_found][0]
else:
raise ValueError(
f"Could not determine an appropriate value for field `{k}` in object "
f" `{op}`. Looked for \n"
f" 1. an attr called `{k}`,\n"
f" 2. an attr called `_{k}`,\n"
f" 3. an entry in `op.parameters` with key '{k}'.")
if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None: # pylint: disable=protected-access
if tensor_util.is_tensor(kwargs[k]):
static_val = tensor_util.constant_value(kwargs[k])
if static_val is not None:
kwargs[k] = static_val
if isinstance(kwargs[k], (np.ndarray, np.generic)):
kwargs[k] = kwargs[k].tolist()
return kwargs
def _extract_type_spec_recursively(value):
"""Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s.
If `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If
`value` is a collection containing `Tensor` values, recursively supplant them
with their respective `TypeSpec`s in a collection of parallel stucture.
If `value` is none of the above, return it unchanged.
Args:
value: a Python `object` to (possibly) turn into a (collection of)
`tf.TypeSpec`(s).
Returns:
spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`
or `value`, if no `Tensor`s are found.
"""
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
if isinstance(value, variables.Variable):
return resource_variable_ops.VariableSpec(
value.shape, dtype=value.dtype, trainable=value.trainable)
if tensor_util.is_tensor(value):
return tensor_spec.TensorSpec(value.shape, value.dtype)
# Unwrap trackable data structures to comply with `Type_Spec._serialize`
# requirements. `ListWrapper`s are converted to `list`s, and for other
# trackable data structures, the `__wrapped__` attribute is used.
if isinstance(value, list):
return list(_extract_type_spec_recursively(v) for v in value)
if isinstance(value, data_structures.TrackableDataStructure):
return _extract_type_spec_recursively(value.__wrapped__)
if isinstance(value, tuple):
return type(value)(_extract_type_spec_recursively(x) for x in value)
if isinstance(value, dict):
return type(value)((k, _extract_type_spec_recursively(v))
for k, v in value.items())
return value
# Overrides for tf.linalg functions. This allows a LinearOperator to be used in
# place of a Tensor.
# For instance tf.trace(linop) and linop.trace() both work.
@dispatch.dispatch_for_types(linalg.adjoint, LinearOperator)
def _adjoint(matrix, name=None):
return matrix.adjoint(name)
@dispatch.dispatch_for_types(linalg.cholesky, LinearOperator)
def _cholesky(input, name=None): # pylint:disable=redefined-builtin
return input.cholesky(name)
# The signature has to match with the one in python/op/array_ops.py,
# so we have k, padding_value, and align even though we don't use them here.
# pylint:disable=unused-argument
@dispatch.dispatch_for_types(linalg.diag_part, LinearOperator)
def _diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0,
align="RIGHT_LEFT"):
return input.diag_part(name)
# pylint:enable=unused-argument
@dispatch.dispatch_for_types(linalg.det, LinearOperator)
def _det(input, name=None): # pylint:disable=redefined-builtin
return input.determinant(name)
@dispatch.dispatch_for_types(linalg.inv, LinearOperator)
def _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin
inv = input.inverse(name)
if adjoint:
inv = inv.adjoint()
return inv
@dispatch.dispatch_for_types(linalg.logdet, LinearOperator)
def _logdet(matrix, name=None):
if matrix.is_positive_definite and matrix.is_self_adjoint:
return matrix.log_abs_determinant(name)
raise ValueError("Expected matrix to be self-adjoint positive definite.")
@dispatch.dispatch_for_types(math_ops.matmul, LinearOperator)
def _matmul( # pylint:disable=missing-docstring
a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
output_type=None, # pylint: disable=unused-argument
grad_a=False, # pylint: disable=unused-argument
grad_b=False, # pylint: disable=unused-argument
name=None,
):
if transpose_a or transpose_b:
raise ValueError("Transposing not supported at this time.")
if a_is_sparse or b_is_sparse:
raise ValueError("Sparse methods not supported at this time.")
if not isinstance(a, LinearOperator):
# We use the identity (B^HA^H)^H = AB
adjoint_matmul = b.matmul(
a,
adjoint=(not adjoint_b),
adjoint_arg=(not adjoint_a),
name=name)
return linalg.adjoint(adjoint_matmul)
return a.matmul(
b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name)
@dispatch.dispatch_for_types(linalg.solve, LinearOperator)
def _solve(
matrix,
rhs,
adjoint=False,
name=None):
if not isinstance(matrix, LinearOperator):
raise ValueError("Passing in `matrix` as a Tensor and `rhs` as a "
"LinearOperator is not supported.")
return matrix.solve(rhs, adjoint=adjoint, name=name)
@dispatch.dispatch_for_types(linalg.trace, LinearOperator)
def _trace(x, name=None):
return x.trace(name)
| _LinearOperatorSpec |
python | keras-team__keras | keras/src/ops/core_test.py | {
"start": 49222,
"end": 56070
} | class ____(testing.TestCase):
def test_associative_scan_invalid_arguments(self):
# varying dimension at scan axis
x = (np.array([1, 2]), np.array([3, 4]), np.array([5, 6, 7]))
with self.assertRaisesRegex(ValueError, " first dimension"):
core.associative_scan(lambda x, y: (x[0] + y[0], x[1] + y[1]), x)
# same error, symbolic
x = (
KerasTensor((None, 5)),
KerasTensor((None, 4)),
)
with self.assertRaisesRegex(ValueError, " first dimension"):
core.associative_scan(
lambda x, y: (x[0] + y[0], x[1] + y[1]), x, axis=1
)
def test_cond_check_output_spec(self):
mock_spec = Mock(dtype="float32", shape=(2, 2))
mock_spec_different = Mock(dtype="int32", shape=(3, 3))
# List & tuple.
self.assertTrue(
core.Cond()._check_output_spec(
[mock_spec, mock_spec], [mock_spec, mock_spec]
)
)
self.assertTrue(
core.Cond()._check_output_spec([mock_spec], [mock_spec])
)
self.assertFalse(
core.Cond()._check_output_spec(
[mock_spec], [mock_spec, mock_spec_different]
)
)
self.assertTrue(
core.Cond()._check_output_spec((mock_spec,), (mock_spec,))
)
self.assertFalse(
core.Cond()._check_output_spec(
(mock_spec,), (mock_spec, mock_spec_different)
)
)
# Dict.
self.assertTrue(
core.Cond()._check_output_spec({"a": mock_spec}, {"a": mock_spec})
)
self.assertFalse(
core.Cond()._check_output_spec({"a": mock_spec}, {"b": mock_spec})
)
self.assertFalse(
core.Cond()._check_output_spec(
{"a": mock_spec}, {"a": mock_spec, "b": mock_spec}
)
)
# None.
self.assertTrue(core.Cond()._check_output_spec(None, None))
self.assertFalse(
core.Cond()._check_output_spec(
None, Mock(dtype="float32", shape=(2, 2))
)
)
self.assertFalse(
core.Cond()._check_output_spec(
Mock(dtype="float32", shape=(2, 2)), None
)
)
# KerasTensor.
mock_spec1 = KerasTensor(shape=(2, 2), dtype="float32")
mock_spec2 = KerasTensor(shape=(2, 2), dtype="float32")
self.assertTrue(core.Cond()._check_output_spec(mock_spec1, mock_spec2))
@pytest.mark.requires_trainable_backend
def test_cond_raw_bool_compile(self):
class ExampleLayer(layers.Layer):
def call(self, x, training=False):
return ops.cond(training, lambda: x, lambda: x * 2.0)
model = models.Sequential([ExampleLayer()])
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.ones((2, 4), dtype="float32")
y = np.zeros((2, 4), dtype="float32")
model.evaluate(x, y, batch_size=2)
def test_convert_to_numpy(self):
x = ops.array([1, 2, 3], dtype="float32")
y = ops.convert_to_numpy(x)
self.assertIsInstance(y, np.ndarray)
# Test assignment -- should not fail.
y[0] = 1.0
with self.assertRaises(ValueError):
ops.convert_to_numpy(KerasTensor((2,)))
def test_scan_invalid_arguments(self):
def cumsum(carry, xs):
carry = carry + xs
return carry, carry
init = np.array(0, dtype="float32")
xs = np.array([1, 2, 3, 4, 10, 20], dtype="float32")
# Test non-callable
with self.assertRaisesRegex(TypeError, "should be a callable."):
core.scan(123, init, xs)
# Test bad unroll
with self.assertRaisesRegex(
ValueError, "must be an positive integer or boolean."
):
core.scan(cumsum, init, xs, unroll=-1)
# Test both xs and length are None
with self.assertRaisesRegex(ValueError, "to scan over and"):
core.scan(cumsum, init, xs=None, length=None)
def test_slice_compute_output_spec(self):
inputs = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float32")
start_indices = np.array([1, 1])
shape = (2, 2)
output_spec = core.Slice(shape).compute_output_spec(
inputs, start_indices
)
self.assertEqual(output_spec.shape, shape)
self.assertEqual(output_spec.dtype, inputs.dtype)
def test_stop_gradient_compute_output_spec(self):
variable = KerasTensor(shape=(3,), dtype="float32")
stop_gradient = core.StopGradient()
output_spec = stop_gradient.compute_output_spec(variable)
self.assertEqual(output_spec.shape, variable.shape)
self.assertEqual(output_spec.dtype, variable.dtype)
def test_vectorized_map_serialization(self):
@object_registration.register_keras_serializable()
def f(x):
return x + x
inputs = input_layer.Input((10,), dtype="float32")
outputs = core.vectorized_map(f, inputs)
model = models.Functional(inputs, outputs)
reloaded_model = model.from_config(model.get_config())
x = np.random.rand(5, 10).astype("float32")
self.assertAllClose(model(x), reloaded_model(x))
def test_while_loop_output_spec(self):
# Define dummy cond and body functions
def cond(x):
return True
def body(x):
return (x,)
while_loop = core.WhileLoop(cond, body, maximum_iterations=None)
loop_vars = (KerasTensor(shape=(10,), dtype="float32"),)
output_spec = while_loop.compute_output_spec(loop_vars)
self.assertEqual(output_spec[0].shape, loop_vars[0].shape)
self.assertEqual(output_spec[0].dtype, loop_vars[0].dtype)
# Test with KerasTensor.
loop_vars = (np.random.rand(5, 5), np.random.randint(10, size=(3, 7)))
keras_loop_vars = [
KerasTensor(v.shape, dtype=v.dtype) for v in loop_vars
]
while_loop = core.WhileLoop(cond, body, maximum_iterations=None)
output_specs = while_loop.compute_output_spec(keras_loop_vars)
self.assertEqual(output_specs[0].shape, keras_loop_vars[0].shape)
self.assertEqual(output_specs[0].dtype, keras_loop_vars[0].dtype)
self.assertEqual(output_specs[1].shape, keras_loop_vars[1].shape)
self.assertEqual(output_specs[1].dtype, keras_loop_vars[1].dtype)
def test_unstack_unknown_axis_num(self):
x = KerasTensor((2, None, None))
axis = 1
with self.assertRaisesRegex(
ValueError, r"Cannot infer argument `num` from shape"
):
core.unstack(x, axis=axis)
| CoreOpsBehaviorTests |
python | doocs__leetcode | solution/1400-1499/1434.Number of Ways to Wear Different Hats to Each Other/Solution.py | {
"start": 0,
"end": 622
} | class ____:
def numberWays(self, hats: List[List[int]]) -> int:
g = defaultdict(list)
for i, h in enumerate(hats):
for v in h:
g[v].append(i)
mod = 10**9 + 7
n = len(hats)
m = max(max(h) for h in hats)
f = [[0] * (1 << n) for _ in range(m + 1)]
f[0][0] = 1
for i in range(1, m + 1):
for j in range(1 << n):
f[i][j] = f[i - 1][j]
for k in g[i]:
if j >> k & 1:
f[i][j] = (f[i][j] + f[i - 1][j ^ (1 << k)]) % mod
return f[m][-1]
| Solution |
python | walkccc__LeetCode | solutions/2249. Count Lattice Points Inside a Circle/2249.py | {
"start": 0,
"end": 231
} | class ____:
def countLatticePoints(self, circles: list[list[int]]) -> int:
return sum(any((xc - x)**2 + (yc - y)**2 <= r**2 for xc, yc, r in circles)
for x in range(201)
for y in range(201))
| Solution |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_searchstrategy.py | {
"start": 5239,
"end": 5956
} | class ____:
a: Any
b: Any
@pytest.mark.parametrize(
"obj, value",
[
(recursive_list, ["[...]"]),
(recursive_dict, {"a": "{...}"}),
(mutual1, [["[...]"]]),
(mutual2, [["[...]"]]),
# same id object in different fields. no cycle
(A(a=shared, b=shared), {"a": "shared", "b": "shared"}),
(A(a=recursive_list, b=recursive_dict), {"a": ["[...]"], "b": {"a": "{...}"}}),
],
)
def test_to_jsonable_handles_reference_cycles(obj, value):
assert to_jsonable(obj, avoid_realization=False) == value
def test_deferred_strategy_draw():
strategy = st.deferred(lambda: st.integers())
assert strategy.do_draw(ConjectureData.for_choices([0])) == 0
| A |
python | getsentry__sentry | tests/sentry/api/bases/test_organization.py | {
"start": 14835,
"end": 23156
} | class ____(BaseOrganizationEndpointTest):
def setUp(self) -> None:
self.team_1 = self.create_team(organization=self.org)
self.team_2 = self.create_team(organization=self.org)
self.team_3 = self.create_team(organization=self.org)
self.create_team_membership(user=self.member, team=self.team_2)
self.project_1 = self.create_project(
organization=self.org, teams=[self.team_1, self.team_3], slug="foo"
)
self.project_2 = self.create_project(
organization=self.org, teams=[self.team_2, self.team_3], slug="bar"
)
def run_test(
self,
expected_projects,
user=None,
project_ids=None,
include_all_accessible=False,
active_superuser=False,
):
request_args = {}
if project_ids:
request_args["project"] = project_ids
result = self.endpoint.get_projects(
self.build_request(user=user, active_superuser=active_superuser, **request_args),
self.org,
include_all_accessible=include_all_accessible,
)
assert {p.id for p in expected_projects} == {p.id for p in result}
def test_no_ids_no_teams(self) -> None:
# Should get nothing if not part of the org
self.run_test([])
# Should get everything if super user
self.run_test([self.project_1, self.project_2], user=self.user, active_superuser=True)
# owner does not see projects they aren't members of if not included in query params
self.run_test([], user=self.owner)
# owner sees projects they have access to if they're included as query params
self.run_test(
[self.project_1, self.project_2],
user=self.owner,
project_ids=[self.project_1.id, self.project_2.id],
)
# Should get everything if org is public and ids are specified
self.org.flags.allow_joinleave = True
self.org.save()
self.run_test(
[self.project_1, self.project_2],
user=self.member,
project_ids=[self.project_1.id, self.project_2.id],
)
self.run_test([], include_all_accessible=False)
def test_no_ids_teams(self) -> None:
membership = self.create_team_membership(user=self.user, team=self.team_1)
self.run_test([self.project_1])
membership.delete()
self.create_team_membership(user=self.user, team=self.team_3)
self.run_test([self.project_1, self.project_2])
def test_ids_no_teams(self) -> None:
with pytest.raises(PermissionDenied):
self.run_test([], project_ids=[self.project_1.id])
self.run_test(
[self.project_1], user=self.user, project_ids=[self.project_1.id], active_superuser=True
)
# owner should see project if they explicitly request it, even if the don't
# have membership
self.run_test([self.project_1], user=self.owner, project_ids=[self.project_1.id])
self.org.flags.allow_joinleave = True
self.org.save()
self.run_test([self.project_1], user=self.member, project_ids=[self.project_1.id])
self.org.flags.allow_joinleave = False
self.org.save()
with pytest.raises(PermissionDenied):
self.run_test([self.project_1], user=self.member, project_ids=[self.project_1.id])
def test_ids_teams(self) -> None:
membership = self.create_team_membership(user=self.user, team=self.team_1)
self.run_test([self.project_1], project_ids=[self.project_1.id])
with pytest.raises(PermissionDenied):
self.run_test([], project_ids=[self.project_2.id])
membership.delete()
self.create_team_membership(user=self.user, team=self.team_3)
self.run_test(
[self.project_1, self.project_2], project_ids=[self.project_1.id, self.project_2.id]
)
def test_none_user(self) -> None:
request = RequestFactory().get("/")
request.session = SessionBase()
request.access = NoAccess()
request.auth = None
result = self.endpoint.get_projects(request, self.org)
assert [] == result
request.user = AnonymousUser()
result = self.endpoint.get_projects(request, self.org)
assert [] == result
def test_all_accessible_sigil_value_no_open_join(self) -> None:
assert self.org.flags.allow_joinleave.number == 0, "precondition not met"
self.create_team_membership(user=self.user, team=self.team_1)
self.run_test([self.project_1], project_ids=[-1])
def test_all_accessible_sigil_value_allow_joinleave(self) -> None:
self.org.flags.allow_joinleave = True
self.org.save()
# With membership on only one team you get all projects
self.create_team_membership(user=self.user, team=self.team_1)
self.run_test([self.project_1, self.project_2], project_ids=[-1])
@mock.patch(
"sentry.api.bases.organization.OrganizationEndpoint._filter_projects_by_permissions"
)
@mock.patch(
"sentry.api.bases.organization.OrganizationEndpoint.get_requested_project_ids_unchecked"
)
def test_get_projects_no_slug_fallsback_to_ids(
self, mock_get_project_ids_unchecked, mock__filter_projects_by_permissions
):
project_slugs = [""]
request = self.build_request(projectSlug=project_slugs)
mock_get_project_ids_unchecked.return_value = {self.project_1.id}
def side_effect(
projects,
**kwargs,
):
return projects
mock__filter_projects_by_permissions.side_effect = side_effect
self.endpoint.get_projects(
request,
self.org,
)
mock_get_project_ids_unchecked.assert_called_with(request)
mock__filter_projects_by_permissions.assert_called_with(
projects=[self.project_1],
request=request,
filter_by_membership=False,
force_global_perms=False,
include_all_accessible=False,
)
@mock.patch(
"sentry.api.bases.organization.OrganizationEndpoint._filter_projects_by_permissions"
)
def test_get_projects_by_slugs(
self, mock__filter_projects_by_permissions: mock.MagicMock
) -> None:
project_slugs = [self.project_1.slug]
request = self.build_request(projectSlug=project_slugs)
def side_effect(
projects,
**kwargs,
):
return projects
mock__filter_projects_by_permissions.side_effect = side_effect
self.endpoint.get_projects(
request,
self.org,
)
mock__filter_projects_by_permissions.assert_called_with(
projects=[self.project_1],
request=request,
filter_by_membership=False,
force_global_perms=False,
include_all_accessible=False,
)
@mock.patch(
"sentry.api.bases.organization.OrganizationEndpoint._filter_projects_by_permissions"
)
def test_get_projects_by_slugs_all(
self, mock__filter_projects_by_permissions: mock.MagicMock
) -> None:
project_slugs = ALL_ACCESS_PROJECTS_SLUG
request = self.build_request(projectSlug=project_slugs)
def side_effect(
projects,
**kwargs,
):
return projects
mock__filter_projects_by_permissions.side_effect = side_effect
response = self.endpoint.get_projects(
request,
self.org,
)
mock__filter_projects_by_permissions.assert_called_with(
projects=[self.project_1, self.project_2],
request=request,
filter_by_membership=False,
force_global_perms=False,
include_all_accessible=True,
)
assert len(response) == 2
assert self.project_1 in response
assert self.project_2 in response
def test_get_projects_by_slugs_no_projects_with_slug(self) -> None:
project_slugs = ["hello"]
request = self.build_request(projectSlug=project_slugs)
with pytest.raises(PermissionDenied):
self.endpoint.get_projects(request, self.org)
| GetProjectIdsTest |
python | sympy__sympy | sympy/combinatorics/perm_groups.py | {
"start": 983,
"end": 179285
} | class ____(Basic):
r"""The class defining a Permutation group.
Explanation
===========
``PermutationGroup([p1, p2, ..., pn])`` returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics import Polyhedron
The permutations corresponding to motion of the front, right and
bottom face of a $2 \times 2$ Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the $2 \times 2$ Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
sympy.combinatorics.polyhedron.Polyhedron,
sympy.combinatorics.permutations.Permutation
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
.. [2] Seress, A.
"Permutation Group Algorithms"
.. [3] https://en.wikipedia.org/wiki/Schreier_vector
.. [4] https://en.wikipedia.org/wiki/Nielsen_transformation#Product_replacement_algorithm
.. [5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
.. [6] https://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
.. [7] https://algorithmist.com/wiki/Union_find
.. [8] https://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
.. [9] https://en.wikipedia.org/wiki/Center_%28group_theory%29
.. [10] https://en.wikipedia.org/wiki/Centralizer_and_normalizer
.. [11] https://groupprops.subwiki.org/wiki/Derived_subgroup
.. [12] https://en.wikipedia.org/wiki/Nilpotent_group
.. [13] https://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
.. [14] https://docs.gap-system.org/doc/ref/manual.pdf
"""
is_group = True
def __new__(cls, *args, dups=True, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is ``False``.
"""
if not args:
args = [Permutation()]
else:
args = list(args[0] if is_sequence(args[0]) else args)
if not args:
args = [Permutation()]
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if dups:
args = list(uniq([_af_new(list(a)) for a in args]))
if len(args) > 1:
args = [g for g in args if not g.is_identity]
return Basic.__new__(cls, *args, **kwargs)
def __init__(self, *args, **kwargs):
self._generators = list(self.args)
self._order = None
self._elements = []
self._center = None
self._is_abelian = None
self._is_transitive = None
self._is_sym = None
self._is_alt = None
self._is_primitive = None
self._is_nilpotent = None
self._is_solvable = None
self._is_trivial = None
self._transitivity_degree = None
self._max_div = None
self._is_perfect = None
self._is_cyclic = None
self._is_dihedral = None
self._r = len(self._generators)
self._degree = self._generators[0].size
# these attributes are assigned after running schreier_sims
self._base = []
self._strong_gens = []
self._strong_gens_slp = []
self._basic_orbits = []
self._transversals = []
self._transversal_slp = []
# these attributes are assigned after running _random_pr_init
self._random_gens = []
# finite presentation of the group as an instance of `FpGroup`
self._fp_presentation = None
def __getitem__(self, i):
return self._generators[i]
def __contains__(self, i):
"""Return ``True`` if *i* is contained in PermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = Permutation(1, 2, 3)
>>> Permutation(3) in PermutationGroup(p)
True
"""
if not isinstance(i, Permutation):
raise TypeError("A PermutationGroup contains only Permutations as "
"elements, not elements of type %s" % type(i))
return self.contains(i)
def __len__(self):
return len(self._generators)
def equals(self, other):
"""Return ``True`` if PermutationGroup generated by elements in the
group are same i.e they represent the same PermutationGroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G.equals(H)
True
"""
if not isinstance(other, PermutationGroup):
return False
set_self_gens = set(self.generators)
set_other_gens = set(other.generators)
# before reaching the general case there are also certain
# optimisation and obvious cases requiring less or no actual
# computation.
if set_self_gens == set_other_gens:
return True
# in the most general case it will check that each generator of
# one group belongs to the other PermutationGroup and vice-versa
for gen1 in set_self_gens:
if not other.contains(gen1):
return False
for gen2 in set_other_gens:
if not self.contains(gen2):
return False
return True
def __mul__(self, other):
"""
Return the direct product of two permutation groups as a permutation
group.
Explanation
===========
This implementation realizes the direct product by shifting the index
set for the generators of the second group: so if we have ``G`` acting
on ``n1`` points and ``H`` acting on ``n2`` points, ``G*H`` acts on
``n1 + n2`` points.
Examples
========
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
(9)(0 1 2 3 4),
(5 6 7 8 9)])
>>> H.order()
25
"""
if isinstance(other, Permutation):
return Coset(other, self, dir='+')
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
Explanation
===========
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group `G` with a set of generators
`S`. For the initialization ``_random_pr_init``, a list ``R`` of
`\max\{r, |S|\}` group generators is created as the attribute
``G._random_gens``, repeating elements of `S` if necessary, and the
identity element of `G` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of `G` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from `\{gh, g(~h), hg, (~h)g\}`. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across `G` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Explanation
===========
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
.. [7] https://algorithmist.com/wiki/Union_find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Explanation
===========
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
.. [7] https://algorithmist.com/wiki/Union_find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
r"""Return a base from the Schreier-Sims algorithm.
Explanation
===========
For a permutation group `G`, a base is a sequence of points
`B = (b_1, b_2, \dots, b_k)` such that no element of `G` apart
from the identity fixes all the points in `B`. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of `B` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
Explanation
===========
If a base for a group `G` is given by `(b_1, b_2, \dots, b_k)`, this
function returns a base `(b_1, b_2, \dots, b_{i+1}, b_i, \dots, b_k)`,
where `i` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[(0 1 2 3), (3)(0 1), (1 3 2),
(2 3), (1 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
`|\beta_{i+1}^{\left\langle T\right\rangle}|` should be replaced by
`|\beta_{i}^{\left\langle T\right\rangle}|`, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
//len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
r"""
Return the basic orbits relative to a base and strong generating set.
Explanation
===========
If `(b_1, b_2, \dots, b_k)` is a base for a group `G`, and
`G^{(i)} = G_{b_1, b_2, \dots, b_{i-1}}` is the ``i``-th basic stabilizer
(so that `G^{(1)} = G`), the ``i``-th basic orbit relative to this base
is the orbit of `b_i` under `G^{(i)}`. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
r"""
Return a chain of stabilizers relative to a base and strong generating
set.
Explanation
===========
The ``i``-th basic stabilizer `G^{(i)}` relative to a base
`(b_1, b_2, \dots, b_k)` is `G_{b_1, b_2, \dots, b_{i-1}}`. For more
information, see [1], pp. 87-89.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
(3)(0 1 2),
(1 2 3)])
PermutationGroup([
(1 2 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
if not base: # e.g. if self is trivial
return []
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
Explanation
===========
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: (3), 1: (3)(0 1 2), 2: (3)(0 2 1), 3: (0 3 1)}, {1: (3), 2: (1 2 3), 3: (1 3 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def composition_series(self):
r"""
Return the composition series for a group as a list
of permutation groups.
Explanation
===========
The composition series for a group `G` is defined as a
subnormal series `G = H_0 > H_1 > H_2 \ldots` A composition
series is a subnormal series such that each factor group
`H(i+1) / H(i)` is simple.
A subnormal series is a composition series only if it is of
maximum length.
The algorithm works as follows:
Starting with the derived series the idea is to fill
the gap between `G = der[i]` and `H = der[i+1]` for each
`i` independently. Since, all subgroups of the abelian group
`G/H` are normal so, first step is to take the generators
`g` of `G` and add them to generators of `H` one by one.
The factor groups formed are not simple in general. Each
group is obtained from the previous one by adding one
generator `g`, if the previous group is denoted by `H`
then the next group `K` is generated by `g` and `H`.
The factor group `K/H` is cyclic and it's order is
`K.order()//G.order()`. The series is then extended between
`K` and `H` by groups generated by powers of `g` and `H`.
The series formed is then prepended to the already existing
series.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> S = SymmetricGroup(12)
>>> G = S.sylow_subgroup(2)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
>>> G = S.sylow_subgroup(3)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[243, 81, 27, 9, 3, 1]
>>> G = CyclicGroup(12)
>>> C = G.composition_series()
>>> [H.order() for H in C]
[12, 6, 3, 1]
"""
der = self.derived_series()
if not all(g.is_identity for g in der[-1].generators):
raise NotImplementedError('Group should be solvable')
series = []
for i in range(len(der)-1):
H = der[i+1]
up_seg = []
for g in der[i].generators:
K = PermutationGroup([g] + H.generators)
order = K.order() // H.order()
down_seg = []
for p, e in factorint(order).items():
for _ in range(e):
down_seg.append(PermutationGroup([g] + H.generators))
g = g**p
up_seg = down_seg + up_seg
H = K
up_seg[0] = der[i]
series.extend(up_seg)
series.append(der[-1])
return series
def coset_transversal(self, H):
"""Return a transversal of the right cosets of self by its subgroup H
using the second method described in [1], Subsection 4.6.7
"""
if not H.is_subgroup(self):
raise ValueError("The argument must be a subgroup")
if H.order() == 1:
return self.elements
self._schreier_sims(base=H.base) # make G.base an extension of H.base
base = self.base
base_ordering = _base_ordering(base, self.degree)
identity = Permutation(self.degree - 1)
transversals = self.basic_transversals[:]
# transversals is a list of dictionaries. Get rid of the keys
# so that it is a list of lists and sort each list in
# the increasing order of base[l]^x
for l, t in enumerate(transversals):
transversals[l] = sorted(t.values(),
key = lambda x: base_ordering[base[l]^x])
orbits = H.basic_orbits
h_stabs = H.basic_stabilizers
g_stabs = self.basic_stabilizers
indices = [x.order()//y.order() for x, y in zip(g_stabs, h_stabs)]
# T^(l) should be a right transversal of H^(l) in G^(l) for
# 1<=l<=len(base). While H^(l) is the trivial group, T^(l)
# contains all the elements of G^(l) so we might just as well
# start with l = len(h_stabs)-1
if len(g_stabs) > len(h_stabs):
T = g_stabs[len(h_stabs)].elements
else:
T = [identity]
l = len(h_stabs)-1
t_len = len(T)
while l > -1:
T_next = []
for u in transversals[l]:
if u == identity:
continue
b = base_ordering[base[l]^u]
for t in T:
p = t*u
if all(base_ordering[h^p] >= b for h in orbits[l]):
T_next.append(p)
if t_len + len(T_next) == indices[l]:
break
if t_len + len(T_next) == indices[l]:
break
T += T_next
t_len += len(T_next)
l -= 1
T.remove(identity)
T = [identity] + T
return T
def _coset_representative(self, g, H):
"""Return the representative of Hg from the transversal that
would be computed by ``self.coset_transversal(H)``.
"""
if H.order() == 1:
return g
# The base of self must be an extension of H.base.
if not(self.base[:len(H.base)] == H.base):
self._schreier_sims(base=H.base)
orbits = H.basic_orbits[:]
h_transversals = [list(_.values()) for _ in H.basic_transversals]
transversals = [list(_.values()) for _ in self.basic_transversals]
base = self.base
base_ordering = _base_ordering(base, self.degree)
def step(l, x):
gamma = min(orbits[l], key = lambda y: base_ordering[y^x])
i = [base[l]^h for h in h_transversals[l]].index(gamma)
x = h_transversals[l][i]*x
if l < len(orbits)-1:
for u in transversals[l]:
if base[l]^u == base[l]^x:
break
x = step(l+1, x*u**-1)*u
return x
return step(0, g)
def coset_table(self, H):
"""Return the standardised (right) coset table of self in H as
a list of lists.
"""
# Maybe this should be made to return an instance of CosetTable
# from fp_groups.py but the class would need to be changed first
# to be compatible with PermutationGroups
if not H.is_subgroup(self):
raise ValueError("The argument must be a subgroup")
T = self.coset_transversal(H)
n = len(T)
A = list(chain.from_iterable((gen, gen**-1)
for gen in self.generators))
table = []
for i in range(n):
row = [self._coset_representative(T[i]*x, H) for x in A]
row = [T.index(r) for r in row]
table.append(row)
# standardize (this is the same as the algorithm used in coset_table)
# If CosetTable is made compatible with PermutationGroups, this
# should be replaced by table.standardize()
A = range(len(A))
gamma = 1
for alpha, a in product(range(n), A):
beta = table[alpha][a]
if beta >= gamma:
if beta > gamma:
for x in A:
z = table[gamma][x]
table[gamma][x] = table[beta][x]
table[beta][x] = z
for i in range(n):
if table[i][x] == beta:
table[i][x] = gamma
elif table[i][x] == gamma:
table[i][x] = beta
gamma += 1
if gamma >= n-1:
return table
def center(self):
r"""
Return the center of a permutation group.
Explanation
===========
The center for a group `G` is defined as
`Z(G) = \{z\in G | \forall g\in G, zg = gz \}`,
the set of elements of `G` that commute with all elements of `G`.
It is equal to the centralizer of `G` inside `G`, and is naturally a
subgroup of `G` ([9]).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
if not self._center:
self._center = self.centralizer(self)
return self._center
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
Explanation
===========
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
`C_G(S) = \{ g \in G | gs = sg \forall s \in S\}` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
trivial_test = lambda x: True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
Explanation
===========
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators `[g, h] = hgh^{-1}g^{-1}` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups `H, G` is equal to the normal closure
of the commutators of all the generators, i.e. `hgh^{-1}g^{-1}` for `h`
a generator of `H` and `g` a generator of `G` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
Explanation
===========
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True); f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
See Also
========
sympy.combinatorics.util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def generator_product(self, g, original=False):
r'''
Return a list of strong generators `[s1, \dots, sn]`
s.t `g = sn \times \dots \times s1`. If ``original=True``, make the
list contain only the original group generators
'''
product = []
if g.is_identity:
return []
if g in self.strong_gens:
if not original or g in self.generators:
return [g]
else:
slp = self._strong_gens_slp[g]
for s in slp:
product.extend(self.generator_product(s, original=True))
return product
elif g**-1 in self.strong_gens:
g = g**-1
if not original or g in self.generators:
return [g**-1]
else:
slp = self._strong_gens_slp[g]
for s in slp:
product.extend(self.generator_product(s, original=True))
l = len(product)
product = [product[l-i-1]**-1 for i in range(l)]
return product
f = self.coset_factor(g, True)
for i, j in enumerate(f):
slp = self._transversal_slp[i][j]
for s in slp:
if not original:
product.append(self.strong_gens[s])
else:
s = self.strong_gens[s]
product.extend(self.generator_product(s, original=True))
return product
def coset_rank(self, g):
"""rank using Schreier-Sims representation.
Explanation
===========
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
(7)(2 4)(3 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return None
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return None
base = self.base
transversals = self.basic_transversals
basic_orbits = self.basic_orbits
m = len(base)
if m == 0:
h = list(range(self._degree))
if af:
return h
else:
return _af_new(h)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
Explanation
===========
The number of permutations comprising the group is given by
``len(group)``; the number of permutations that can be generated
by the group is given by ``group.order()``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
See Also
========
order
"""
return self._degree
@property
def identity(self):
'''
Return the identity element of the permutation group.
'''
return _af_new(list(range(self.degree)))
@property
def elements(self):
"""Returns all the elements of the permutation group as a list
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> p = PermutationGroup(Permutation(1, 3), Permutation(1, 2))
>>> p.elements
[(3), (3)(1 2), (1 3), (2 3), (1 2 3), (1 3 2)]
"""
if not self._elements:
self._elements = list(self.generate())
return self._elements[:]
def derived_series(self):
r"""Return the derived series for the group.
Explanation
===========
The derived series for a group `G` is defined as
`G = G_0 > G_1 > G_2 > \ldots` where `G_i = [G_{i-1}, G_{i-1}]`,
i.e. `G_i` is the derived subgroup of `G_{i-1}`, for
`i\in\mathbb{N}`. When we have `G_k = G_{k-1}` for some
`k\in\mathbb{N}`, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order `G = G_0, G_1, G_2, \ldots`.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup, DihedralGroup)
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
nxt = self.derived_subgroup()
while not current.is_subgroup(nxt):
res.append(nxt)
current = nxt
nxt = nxt.derived_subgroup()
return res
def derived_subgroup(self):
r"""Compute the derived subgroup.
Explanation
===========
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators `[g, h] = hgh^{-1}g^{-1}` for `g, h\in G` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if ct not in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""Return iterator to generate the elements of the group.
Explanation
===========
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If ``af = True`` it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is also
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group
True
Also the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate())); J
PermutationGroup([
(0 1)(2 3),
(1 2 3),
(1 3 2),
(0 3 1),
(0 2 3),
(0 3)(1 2),
(0 1 3),
(3)(0 2 1),
(0 3 2),
(3)(0 1 2),
(0 2)(1 3)])
>>> _.is_group
True
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise NotImplementedError('No generation defined for %s' % method)
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm.
If ``af == True`` it yields the array form of the permutations.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
References
==========
.. [1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = {tuple(idn)}
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list.copy()
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If ``af = True`` it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
return
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
return
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
return
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[(1 2), (2)(0 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
Explanation
===========
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not ``True``, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, sympy.core.basic.Basic.has, __contains__
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_perfect(self):
"""Return ``True`` if the group is perfect.
A group is perfect if it equals to its derived subgroup.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1,2,3)(4,5)
>>> b = Permutation(1,2,3,4,5)
>>> G = PermutationGroup([a, b])
>>> G.is_perfect
False
"""
if self._is_perfect is None:
self._is_perfect = self.equals(self.derived_subgroup())
return self._is_perfect
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def abelian_invariants(self):
"""
Returns the abelian invariants for the given group.
Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to
the direct product of finitely many nontrivial cyclic groups of
prime-power order.
Explanation
===========
The prime-powers that occur as the orders of the factors are uniquely
determined by G. More precisely, the primes that occur in the orders of the
factors in any such decomposition of ``G`` are exactly the primes that divide
``|G|`` and for any such prime ``p``, if the orders of the factors that are
p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``,
then the orders of the factors that are p-groups in any such decomposition of ``G``
are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``.
The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken
for all primes that divide ``|G|`` are called the invariants of the nontrivial
group ``G`` as suggested in ([14], p. 542).
Notes
=====
We adopt the convention that the invariants of a trivial group are [].
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.abelian_invariants()
[2]
>>> from sympy.combinatorics import CyclicGroup
>>> G = CyclicGroup(7)
>>> G.abelian_invariants()
[7]
"""
if self.is_trivial:
return []
gns = self.generators
inv = []
G = self
H = G.derived_subgroup()
Hgens = H.generators
for p in primefactors(G.order()):
ranks = []
while True:
pows = []
for g in gns:
elm = g**p
if not H.contains(elm):
pows.append(elm)
K = PermutationGroup(Hgens + pows) if pows else H
r = G.order()//K.order()
G = K
gns = pows
if r == 1:
break
ranks.append(multiplicity(p, r))
if ranks:
pows = [1]*ranks[0]
for i in ranks:
for j in range(i):
pows[j] = pows[j]*p
inv.extend(pows)
inv.sort()
return inv
def is_elementary(self, p):
"""Return ``True`` if the group is elementary abelian. An elementary
abelian group is a finite abelian group, where every nontrivial
element has order `p`, where `p` is a prime.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_elementary(2)
True
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([3, 1, 2, 0])
>>> G = PermutationGroup([a, b])
>>> G.is_elementary(2)
True
>>> G.is_elementary(3)
False
"""
return self.is_abelian and all(g.order() == p for g in self.generators)
def _eval_is_alt_sym_naive(self, only_sym=False, only_alt=False):
"""A naive test using the group order."""
if only_sym and only_alt:
raise ValueError(
"Both {} and {} cannot be set to True"
.format(only_sym, only_alt))
n = self.degree
sym_order = _factorial(n)
order = self.order()
if order == sym_order:
self._is_sym = True
self._is_alt = False
return not only_alt
if 2*order == sym_order:
self._is_sym = False
self._is_alt = True
return not only_sym
return False
def _eval_is_alt_sym_monte_carlo(self, eps=0.05, perms=None):
"""A test using monte-carlo algorithm.
Parameters
==========
eps : float, optional
The criterion for the incorrect ``False`` return.
perms : list[Permutation], optional
If explicitly given, it tests over the given candidates
for testing.
If ``None``, it randomly computes ``N_eps`` and chooses
``N_eps`` sample of the permutation from the group.
See Also
========
_check_cycles_alt_sym
"""
if perms is None:
n = self.degree
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
perms = (self.random_pr() for i in range(N_eps))
return self._eval_is_alt_sym_monte_carlo(perms=perms)
for perm in perms:
if _check_cycles_alt_sym(perm):
return True
return False
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
Explanation
===========
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
For degree < 8, the order of the group is checked so the test
is deterministic.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately `\log(2)/\log(n)`
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
_check_cycles_alt_sym
"""
if _random_prec is not None:
N_eps = _random_prec['N_eps']
perms= (_random_prec[i] for i in range(N_eps))
return self._eval_is_alt_sym_monte_carlo(perms=perms)
if self._is_sym or self._is_alt:
return True
if self._is_sym is False and self._is_alt is False:
return False
n = self.degree
if n < 8:
return self._eval_is_alt_sym_naive()
elif self.is_transitive():
return self._eval_is_alt_sym_monte_carlo(eps=eps)
self._is_sym, self._is_alt = False, False
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
Explanation
===========
A group `G` is nilpotent if it has a central series of finite length.
Alternatively, `G` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr, strict=True):
"""Test if ``G=self`` is a normal subgroup of ``gr``.
Explanation
===========
G is normal in gr if
for each g2 in G, g1 in gr, ``g = g1*g2*g1**-1`` belongs to G
It is sufficient to check this for each g1 in gr.generators and
g2 in G.generators.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
if not self.is_subgroup(gr, strict=strict):
return False
d_self = self.degree
d_gr = gr.degree
if self.is_trivial and (d_self == d_gr or not strict):
return True
if self._is_abelian:
return True
new_self = self.copy()
if not strict and d_self != d_gr:
if d_self < d_gr:
new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)])
else:
gr = PermGroup(gr.generators + [Permutation(d_self - 1)])
gens2 = [p._array_form for p in new_self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not new_self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
r"""Test if a group is primitive.
Explanation
===========
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form `\{0, k\}` for ``k``
ranging over representatives for the orbits of `G_0`, the stabilizer of
``0``. This algorithm has complexity `O(n^2)` where ``n`` is the degree
of the group, and will perform badly if `G_0` is small.
There are two implementations offered: one finds `G_0`
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of `G_0` using ``random_stab``,
hoping that they generate a subgroup of `G_0` with not too many more
orbits than `G_0` (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
if self.is_transitive() is False:
return False
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for _ in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and any(e != 0 for e in self.minimal_block([0, x])):
self._is_primitive = False
return False
self._is_primitive = True
return True
def minimal_blocks(self, randomized=True):
'''
For a transitive group, return the list of all minimal
block systems. If a group is intransitive, return `False`.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> DihedralGroup(6).minimal_blocks()
[[0, 1, 0, 1, 0, 1], [0, 1, 2, 0, 1, 2]]
>>> G = PermutationGroup(Permutation(1,2,5))
>>> G.minimal_blocks()
False
See Also
========
minimal_block, is_transitive, is_primitive
'''
def _number_blocks(blocks):
# number the blocks of a block system
# in order and return the number of
# blocks and the tuple with the
# reordering
n = len(blocks)
appeared = {}
m = 0
b = [None]*n
for i in range(n):
if blocks[i] not in appeared:
appeared[blocks[i]] = m
b[i] = m
m += 1
else:
b[i] = appeared[blocks[i]]
return tuple(b), m
if not self.is_transitive():
return False
blocks = []
num_blocks = []
rep_blocks = []
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0:
block = self.minimal_block([0, x])
num_block, _ = _number_blocks(block)
# a representative block (containing 0)
rep = {j for j in range(self.degree) if num_block[j] == 0}
# check if the system is minimal with
# respect to the already discovere ones
minimal = True
blocks_remove_mask = [False] * len(blocks)
for i, r in enumerate(rep_blocks):
if len(r) > len(rep) and rep.issubset(r):
# i-th block system is not minimal
blocks_remove_mask[i] = True
elif len(r) < len(rep) and r.issubset(rep):
# the system being checked is not minimal
minimal = False
break
# remove non-minimal representative blocks
blocks = [b for i, b in enumerate(blocks) if not blocks_remove_mask[i]]
num_blocks = [n for i, n in enumerate(num_blocks) if not blocks_remove_mask[i]]
rep_blocks = [r for i, r in enumerate(rep_blocks) if not blocks_remove_mask[i]]
if minimal and num_block not in num_blocks:
blocks.append(block)
num_blocks.append(num_block)
rep_blocks.append(rep)
return blocks
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
if self.order() % 2 != 0:
return True
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return ``True`` if all elements of ``self`` belong to ``G``.
If ``strict`` is ``False`` then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics import SymmetricGroup, CyclicGroup
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to ``False``:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if isinstance(G, SymmetricPermutationGroup):
if self.degree != G.degree:
return False
return True
if not isinstance(G, PermutationGroup):
return False
if self == G or self.generators[0]==Permutation():
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
@property
def is_polycyclic(self):
"""Return ``True`` if a group is polycyclic. A group is polycyclic if
it has a subnormal series with cyclic factors. For finite groups,
this is the same as if the group is solvable.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G = PermutationGroup([a, b])
>>> G.is_polycyclic
True
"""
return self.is_solvable
def is_transitive(self, strict=True):
"""Test if the group is transitive.
Explanation
===========
A group is transitive if it has a single orbit.
If ``strict`` is ``False`` the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1, 0, 2, 3])
>>> e = Permutation([0, 1, 3, 2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group `G` is the series
`G = G_0 > G_1 > G_2 > \ldots` where
`G_k = [G, G_{k-1}]`, i.e. every term after the first is equal to the
commutator of `G` and the previous term in `G1` ([1], p.29).
Returns
=======
A list of permutation groups in the order `G = G_0, G_1, G_2, \ldots`
Examples
========
>>> from sympy.combinatorics.named_groups import (AlternatingGroup,
... DihedralGroup)
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
nxt = self.commutator(self, current)
while not current.is_subgroup(nxt):
res.append(nxt)
current = nxt
nxt = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Explanation
===========
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``sympy.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation([0, 2, 1, 3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
Explanation
===========
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
`O(|points||S|)`. ([1], pp. 83-87; [7]).
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.minimal_block([0, 5])
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
>>> D.minimal_block([0, 1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
gamma = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(gamma, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(gamma), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
# rewrite result so that block representatives are minimal
new_reps = {}
return [new_reps.setdefault(r, i) for i, r in enumerate(parents)]
def conjugacy_class(self, x):
r"""Return the conjugacy class of an element in the group.
Explanation
===========
The conjugacy class of an element ``g`` in a group ``G`` is the set of
elements ``x`` in ``G`` that are conjugate with ``g``, i.e. for which
``g = xax^{-1}``
for some ``a`` in ``G``.
Note that conjugacy is an equivalence relation, and therefore that
conjugacy classes are partitions of ``G``. For a list of all the
conjugacy classes of the group, use the conjugacy_classes() method.
In a permutation group, each conjugacy class corresponds to a particular
`cycle structure': for example, in ``S_3``, the conjugacy classes are:
* the identity class, ``{()}``
* all transpositions, ``{(1 2), (1 3), (2 3)}``
* all 3-cycles, ``{(1 2 3), (1 3 2)}``
Examples
========
>>> from sympy.combinatorics import Permutation, SymmetricGroup
>>> S3 = SymmetricGroup(3)
>>> S3.conjugacy_class(Permutation(0, 1, 2))
{(0 1 2), (0 2 1)}
Notes
=====
This procedure computes the conjugacy class directly by finding the
orbit of the element under conjugation in G. This algorithm is only
feasible for permutation groups of relatively small order, but is like
the orbit() function itself in that respect.
"""
# Ref: "Computing the conjugacy classes of finite groups"; Butler, G.
# Groups '93 Galway/St Andrews; edited by Campbell, C. M.
new_class = {x}
last_iteration = new_class
while len(last_iteration) > 0:
this_iteration = set()
for y in last_iteration:
for s in self.generators:
conjugated = s * y * (~s)
if conjugated not in new_class:
this_iteration.add(conjugated)
new_class.update(last_iteration)
last_iteration = this_iteration
return new_class
def conjugacy_classes(self):
r"""Return the conjugacy classes of the group.
Explanation
===========
As described in the documentation for the .conjugacy_class() function,
conjugacy is an equivalence relation on a group G which partitions the
set of elements. This method returns a list of all these conjugacy
classes of G.
Examples
========
>>> from sympy.combinatorics import SymmetricGroup
>>> SymmetricGroup(3).conjugacy_classes()
[{(2)}, {(0 1 2), (0 2 1)}, {(0 2), (1 2), (2)(0 1)}]
"""
identity = _af_new(list(range(self.degree)))
known_elements = {identity}
classes = [known_elements.copy()]
for x in self.generate():
if x not in known_elements:
new_class = self.conjugacy_class(x)
classes.append(new_class)
known_elements.update(new_class)
return classes
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
Explanation
===========
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup, AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for _ in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h^g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h^g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
Explanation
===========
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
{0, 1, 2}
>>> G.orbit([0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
Explanation
===========
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
(0 4 1 2 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
Explanation
===========
For a permutation group `G`, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of ``self``, ordered according to lowest element
in each orbit.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1, 5)(2, 3)(4, 0, 6)
>>> b = Permutation(1, 5)(3, 4)(2, 6, 0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[{0, 2, 3, 4, 6}, {1, 5}]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
``len(group)``; the length of each permutation in the group is
given by ``group.size``.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order is not None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
m = prod([len(x) for x in self.basic_transversals])
self._order = m
return m
def index(self, H):
"""
Returns the index of a permutation group.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation(1,2,3)
>>> b =Permutation(3)
>>> G = PermutationGroup([a])
>>> H = PermutationGroup([b])
>>> G.index(H)
3
"""
if H.is_subgroup(self):
return self.order()//H.order()
@property
def is_symmetric(self):
"""Return ``True`` if the group is symmetric.
Examples
========
>>> from sympy.combinatorics import SymmetricGroup
>>> g = SymmetricGroup(5)
>>> g.is_symmetric
True
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = PermutationGroup(
... Permutation(0, 1, 2, 3, 4),
... Permutation(2, 3))
>>> g.is_symmetric
True
Notes
=====
This uses a naive test involving the computation of the full
group order.
If you need more quicker taxonomy for large groups, you can use
:meth:`PermutationGroup.is_alt_sym`.
However, :meth:`PermutationGroup.is_alt_sym` may not be accurate
and is not able to distinguish between an alternating group and
a symmetric group.
See Also
========
is_alt_sym
"""
_is_sym = self._is_sym
if _is_sym is not None:
return _is_sym
n = self.degree
if n >= 8:
if self.is_transitive():
_is_alt_sym = self._eval_is_alt_sym_monte_carlo()
if _is_alt_sym:
if any(g.is_odd for g in self.generators):
self._is_sym, self._is_alt = True, False
return True
self._is_sym, self._is_alt = False, True
return False
return self._eval_is_alt_sym_naive(only_sym=True)
self._is_sym, self._is_alt = False, False
return False
return self._eval_is_alt_sym_naive(only_sym=True)
@property
def is_alternating(self):
"""Return ``True`` if the group is alternating.
Examples
========
>>> from sympy.combinatorics import AlternatingGroup
>>> g = AlternatingGroup(5)
>>> g.is_alternating
True
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = PermutationGroup(
... Permutation(0, 1, 2, 3, 4),
... Permutation(2, 3, 4))
>>> g.is_alternating
True
Notes
=====
This uses a naive test involving the computation of the full
group order.
If you need more quicker taxonomy for large groups, you can use
:meth:`PermutationGroup.is_alt_sym`.
However, :meth:`PermutationGroup.is_alt_sym` may not be accurate
and is not able to distinguish between an alternating group and
a symmetric group.
See Also
========
is_alt_sym
"""
_is_alt = self._is_alt
if _is_alt is not None:
return _is_alt
n = self.degree
if n >= 8:
if self.is_transitive():
_is_alt_sym = self._eval_is_alt_sym_monte_carlo()
if _is_alt_sym:
if all(g.is_even for g in self.generators):
self._is_sym, self._is_alt = False, True
return True
self._is_sym, self._is_alt = True, False
return False
return self._eval_is_alt_sym_naive(only_alt=True)
self._is_sym, self._is_alt = False, False
return False
return self._eval_is_alt_sym_naive(only_alt=True)
@classmethod
def _distinct_primes_lemma(cls, primes):
"""Subroutine to test if there is only one cyclic group for the
order."""
primes = sorted(primes)
l = len(primes)
for i in range(l):
for j in range(i+1, l):
if primes[j] % primes[i] == 1:
return None
return True
@property
def is_cyclic(self):
r"""
Return ``True`` if the group is Cyclic.
Examples
========
>>> from sympy.combinatorics.named_groups import AbelianGroup
>>> G = AbelianGroup(3, 4)
>>> G.is_cyclic
True
>>> G = AbelianGroup(4, 4)
>>> G.is_cyclic
False
Notes
=====
If the order of a group $n$ can be factored into the distinct
primes $p_1, p_2, \dots , p_s$ and if
.. math::
\forall i, j \in \{1, 2, \dots, s \}:
p_i \not \equiv 1 \pmod {p_j}
holds true, there is only one group of the order $n$ which
is a cyclic group [1]_. This is a generalization of the lemma
that the group of order $15, 35, \dots$ are cyclic.
And also, these additional lemmas can be used to test if a
group is cyclic if the order of the group is already found.
- If the group is abelian and the order of the group is
square-free, the group is cyclic.
- If the order of the group is less than $6$ and is not $4$, the
group is cyclic.
- If the order of the group is prime, the group is cyclic.
References
==========
.. [1] 1978: John S. Rose: A Course on Group Theory,
Introduction to Finite Group Theory: 1.4
"""
if self._is_cyclic is not None:
return self._is_cyclic
if len(self.generators) == 1:
self._is_cyclic = True
self._is_abelian = True
return True
if self._is_abelian is False:
self._is_cyclic = False
return False
order = self.order()
if order < 6:
self._is_abelian = True
if order != 4:
self._is_cyclic = True
return True
factors = factorint(order)
if all(v == 1 for v in factors.values()):
if self._is_abelian:
self._is_cyclic = True
return True
primes = list(factors.keys())
if PermutationGroup._distinct_primes_lemma(primes) is True:
self._is_cyclic = True
self._is_abelian = True
return True
if not self.is_abelian:
self._is_cyclic = False
return False
self._is_cyclic = all(
any(g**(order//p) != self.identity for g in self.generators)
for p, e in factors.items() if e > 1
)
return self._is_cyclic
@property
def is_dihedral(self):
r"""
Return ``True`` if the group is dihedral.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup
>>> G = PermutationGroup(Permutation(1, 6)(2, 5)(3, 4), Permutation(0, 1, 2, 3, 4, 5, 6))
>>> G.is_dihedral
True
>>> G = SymmetricGroup(3)
>>> G.is_dihedral
True
>>> G = CyclicGroup(6)
>>> G.is_dihedral
False
References
==========
.. [Di1] https://math.stackexchange.com/questions/827230/given-a-cayley-table-is-there-an-algorithm-to-determine-if-it-is-a-dihedral-gro/827273#827273
.. [Di2] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral.pdf
.. [Di3] https://kconrad.math.uconn.edu/blurbs/grouptheory/dihedral2.pdf
.. [Di4] https://en.wikipedia.org/wiki/Dihedral_group
"""
if self._is_dihedral is not None:
return self._is_dihedral
order = self.order()
if order % 2 == 1:
self._is_dihedral = False
return False
if order == 2:
self._is_dihedral = True
return True
if order == 4:
# The dihedral group of order 4 is the Klein 4-group.
self._is_dihedral = not self.is_cyclic
return self._is_dihedral
if self.is_abelian:
# The only abelian dihedral groups are the ones of orders 2 and 4.
self._is_dihedral = False
return False
# Now we know the group is of even order >= 6, and nonabelian.
n = order // 2
# Handle special cases where there are exactly two generators.
gens = self.generators
if len(gens) == 2:
x, y = gens
a, b = x.order(), y.order()
# Make a >= b
if a < b:
x, y, a, b = y, x, b, a
# Using Theorem 2.1 of [Di3]:
if a == 2 == b:
self._is_dihedral = True
return True
# Using Theorem 1.1 of [Di3]:
if a == n and b == 2 and y*x*y == ~x:
self._is_dihedral = True
return True
# Proceed with algorithm of [Di1]
# Find elements of orders 2 and n
order_2, order_n = [], []
for p in self.elements:
k = p.order()
if k == 2:
order_2.append(p)
elif k == n:
order_n.append(p)
if len(order_2) != n + 1 - (n % 2):
self._is_dihedral = False
return False
if not order_n:
self._is_dihedral = False
return False
x = order_n[0]
# Want an element y of order 2 that is not a power of x
# (i.e. that is not the 180-deg rotation, when n is even).
y = order_2[0]
if n % 2 == 0 and y == x**(n//2):
y = order_2[1]
self._is_dihedral = (y*x*y == ~x)
return self._is_dihedral
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
Explanation
===========
For a permutation group `G` and a set of points
`\{p_1, p_2,\ldots, p_k\}`, the pointwise stabilizer of
`p_1, p_2, \ldots, p_k` is defined as
`G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\}` ([1],p20).
It is a subgroup of `G`.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
``.stabilizer()``, this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
(0 1)(2 3)
>>> G.make_perm(3, [0, 1, 0])
(0 2 3 1)
>>> G.make_perm([0, 1, 0])
(0 2 3 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randomrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for _ in range(n):
p = self[randomrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element
"""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
Explanation
===========
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
Explanation
===========
It computes the generators of the chain of stabilizers
`G > G_{b_1} > .. > G_{b1,..,b_r} > 1`
in which `G_{b_1,..,b_i}` stabilizes `b_1,..,b_i`,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
`h_1*..*h_s`.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: (2)(0 1), 1: (2), 2: (1 2)},
{0: (2), 2: (0 2)}]
"""
if self._transversals:
return
self._schreier_sims()
return
def _schreier_sims(self, base=None):
schreier = self.schreier_sims_incremental(base=base, slp_dict=True)
base, strong_gens = schreier[:2]
self._base = base
self._strong_gens = strong_gens
self._strong_gens_slp = schreier[2]
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals, slps = _orbits_transversals_from_bsgs(base,\
strong_gens_distr, slp=True)
# rewrite the indices stored in slps in terms of strong_gens
for i, slp in enumerate(slps):
gens = strong_gens_distr[i]
for k in slp:
slp[k] = [strong_gens.index(gens[s]) for s in slp[k]]
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
self._transversal_slp = slps
def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
slp_dict
If `True`, return a dictionary `{g: gens}` for each strong
generator `g` where `gens` is a list of strong generators
coming before `g` in `strong_gens`, such that the product
of the elements of `gens` is equal to `g`.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
if slp_dict:
return base, gens, {gens[0]: [gens[0]]}
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
strong_gens_slp = []
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
slps = {}
base_len = len(_base)
for i in range(base_len):
transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True, slp=True)
transversals[i] = dict(transversals[i])
orbs[i] = list(transversals[i].keys())
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for j, gen in enumerate(strong_gens_distr[i]):
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
slp = [(i, g) for g in slps[i][beta]]
slp = [(i, j)] + slp
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
u1_inv_slp = slps[i][gb][:]
u1_inv_slp.reverse()
u1_inv_slp = [(i, (g,)) for g in u1_inv_slp]
slp = u1_inv_slp + slp
h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
strong_gens_slp.append((h, slp))
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l], slps[l] =\
_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True, slp=True)
transversals[l] = dict(transversals[l])
orbs[l] = list(transversals[l].keys())
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
strong_gens = _gens[:]
if slp_dict:
# create the list of the strong generators strong_gens and
# rewrite the indices of strong_gens_slp in terms of the
# elements of strong_gens
for k, slp in strong_gens_slp:
strong_gens.append(k)
for i in range(len(slp)):
s = slp[i]
if isinstance(s[1], tuple):
slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1
else:
slp[i] = strong_gens_distr[s[0]][s[1]]
strong_gens_slp = dict(strong_gens_slp)
# add the original generators
for g in _gens:
strong_gens_slp[g] = [g]
return (_base, strong_gens, strong_gens_slp)
strong_gens.extend([k for k, _ in strong_gens_slp])
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
Explanation
===========
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most `2^{-consec\_succ}`,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least `1 - 1/\text{consec\_succ}`.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i].keys())
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l].keys())
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
Explanation
===========
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element does not belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([2, 4, 6, 3, 1, 5, 0])
>>> b = Permutation([0, 1, 3, 5, 4, 6, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
Explanation
===========
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
(5)(0 4)(1 3)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
r"""Return a strong generating set from the Schreier-Sims algorithm.
Explanation
===========
A generating set `S = \{g_1, g_2, \dots, g_t\}` for a permutation group
`G` is a strong generating set relative to the sequence of points
(referred to as a "base") `(b_1, b_2, \dots, b_k)` if, for
`1 \leq i \leq k` we have that the intersection of the pointwise
stabilizer `G^{(i+1)} := G_{b_1, b_2, \dots, b_i}` with `S` generates
the pointwise stabilizer `G^{(i+1)}`. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> D.strong_gens
[(0 1 2 3), (0 3)(1 2), (1 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup(self, gens):
"""
Return the subgroup generated by `gens` which is a list of
elements of the group
"""
if not all(g in self for g in gens):
raise ValueError("The group does not contain the supplied generators")
G = PermutationGroup(gens)
return G
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
Explanation
===========
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lengthy and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current implementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key = lambda x: base_ordering[x]) \
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
trivial_test = lambda x: True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])\
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i]) \
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accordingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
r"""Compute the degree of transitivity of the group.
Explanation
===========
A permutation group `G` acting on `\Omega = \{0, 1, \dots, n-1\}` is
``k``-fold transitive, if, for any `k` points
`(a_1, a_2, \dots, a_k) \in \Omega` and any `k` points
`(b_1, b_2, \dots, b_k) \in \Omega` there exists `g \in G` such that
`g(a_1) = b_1, g(a_2) = b_2, \dots, g(a_k) = b_k`
The degree of transitivity of `G` is the maximum ``k`` such that
`G` is ``k``-fold transitive. ([8])
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit(i)
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def _p_elements_group(self, p):
'''
For an abelian p-group, return the subgroup consisting of
all elements of order p (and the identity)
'''
gens = self.generators[:]
gens = sorted(gens, key=lambda x: x.order(), reverse=True)
gens_p = [g**(g.order()/p) for g in gens]
gens_r = []
for i in range(len(gens)):
x = gens[i]
x_order = x.order()
# x_p has order p
x_p = x**(x_order/p)
if i > 0:
P = PermutationGroup(gens_p[:i])
else:
P = PermutationGroup(self.identity)
if x**(x_order/p) not in P:
gens_r.append(x**(x_order/p))
else:
# replace x by an element of order (x.order()/p)
# so that gens still generates G
g = P.generator_product(x_p, original=True)
for s in g:
x = x*s**-1
x_order = x_order/p
# insert x to gens so that the sorting is preserved
del gens[i]
del gens_p[i]
j = i - 1
while j < len(gens) and gens[j].order() >= x_order:
j += 1
gens = gens[:j] + [x] + gens[j:]
gens_p = gens_p[:j] + [x] + gens_p[j:]
return PermutationGroup(gens_r)
def _sylow_alt_sym(self, p):
'''
Return a p-Sylow subgroup of a symmetric or an
alternating group.
Explanation
===========
The algorithm for this is hinted at in [1], Chapter 4,
Exercise 4.
For Sym(n) with n = p^i, the idea is as follows. Partition
the interval [0..n-1] into p equal parts, each of length p^(i-1):
[0..p^(i-1)-1], [p^(i-1)..2*p^(i-1)-1]...[(p-1)*p^(i-1)..p^i-1].
Find a p-Sylow subgroup of Sym(p^(i-1)) (treated as a subgroup
of ``self``) acting on each of the parts. Call the subgroups
P_1, P_2...P_p. The generators for the subgroups P_2...P_p
can be obtained from those of P_1 by applying a "shifting"
permutation to them, that is, a permutation mapping [0..p^(i-1)-1]
to the second part (the other parts are obtained by using the shift
multiple times). The union of this permutation and the generators
of P_1 is a p-Sylow subgroup of ``self``.
For n not equal to a power of p, partition
[0..n-1] in accordance with how n would be written in base p.
E.g. for p=2 and n=11, 11 = 2^3 + 2^2 + 1 so the partition
is [[0..7], [8..9], {10}]. To generate a p-Sylow subgroup,
take the union of the generators for each of the parts.
For the above example, {(0 1), (0 2)(1 3), (0 4), (1 5)(2 7)}
from the first part, {(8 9)} from the second part and
nothing from the third. This gives 4 generators in total, and
the subgroup they generate is p-Sylow.
Alternating groups are treated the same except when p=2. In this
case, (0 1)(s s+1) should be added for an appropriate s (the start
of a part) for each part in the partitions.
See Also
========
sylow_subgroup, is_alt_sym
'''
n = self.degree
gens = []
identity = Permutation(n-1)
# the case of 2-sylow subgroups of alternating groups
# needs special treatment
alt = p == 2 and all(g.is_even for g in self.generators)
# find the presentation of n in base p
coeffs = []
m = n
while m > 0:
coeffs.append(m % p)
m = m // p
power = len(coeffs)-1
# for a symmetric group, gens[:i] is the generating
# set for a p-Sylow subgroup on [0..p**(i-1)-1]. For
# alternating groups, the same is given by gens[:2*(i-1)]
for i in range(1, power+1):
if i == 1 and alt:
# (0 1) shouldn't be added for alternating groups
continue
gen = Permutation([(j + p**(i-1)) % p**i for j in range(p**i)])
gens.append(identity*gen)
if alt:
gen = Permutation(0, 1)*gen*Permutation(0, 1)*gen
gens.append(gen)
# the first point in the current part (see the algorithm
# description in the docstring)
start = 0
while power > 0:
a = coeffs[power]
# make the permutation shifting the start of the first
# part ([0..p^i-1] for some i) to the current one
for _ in range(a):
shift = Permutation()
if start > 0:
for i in range(p**power):
shift = shift(i, start + i)
if alt:
gen = Permutation(0, 1)*shift*Permutation(0, 1)*shift
gens.append(gen)
j = 2*(power - 1)
else:
j = power
for i, gen in enumerate(gens[:j]):
if alt and i % 2 == 1:
continue
# shift the generator to the start of the
# partition part
gen = shift*gen*shift
gens.append(gen)
start += p**power
power = power-1
return gens
def sylow_subgroup(self, p):
'''
Return a p-Sylow subgroup of the group.
The algorithm is described in [1], Chapter 4, Section 7
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> D = DihedralGroup(6)
>>> S = D.sylow_subgroup(2)
>>> S.order()
4
>>> G = SymmetricGroup(6)
>>> S = G.sylow_subgroup(5)
>>> S.order()
5
>>> G1 = AlternatingGroup(3)
>>> G2 = AlternatingGroup(5)
>>> G3 = AlternatingGroup(9)
>>> S1 = G1.sylow_subgroup(3)
>>> S2 = G2.sylow_subgroup(3)
>>> S3 = G3.sylow_subgroup(3)
>>> len1 = len(S1.lower_central_series())
>>> len2 = len(S2.lower_central_series())
>>> len3 = len(S3.lower_central_series())
>>> len1 == len2
True
>>> len1 < len3
True
'''
from sympy.combinatorics.homomorphisms import (
orbit_homomorphism, block_homomorphism)
if not isprime(p):
raise ValueError("p must be a prime")
def is_p_group(G):
# check if the order of G is a power of p
# and return the power
m = G.order()
n = 0
while m % p == 0:
m = m/p
n += 1
if m == 1:
return True, n
return False, n
def _sylow_reduce(mu, nu):
# reduction based on two homomorphisms
# mu and nu with trivially intersecting
# kernels
Q = mu.image().sylow_subgroup(p)
Q = mu.invert_subgroup(Q)
nu = nu.restrict_to(Q)
R = nu.image().sylow_subgroup(p)
return nu.invert_subgroup(R)
order = self.order()
if order % p != 0:
return PermutationGroup([self.identity])
p_group, n = is_p_group(self)
if p_group:
return self
if self.is_alt_sym():
return PermutationGroup(self._sylow_alt_sym(p))
# if there is a non-trivial orbit with size not divisible
# by p, the sylow subgroup is contained in its stabilizer
# (by orbit-stabilizer theorem)
orbits = self.orbits()
non_p_orbits = [o for o in orbits if len(o) % p != 0 and len(o) != 1]
if non_p_orbits:
G = self.stabilizer(list(non_p_orbits[0]).pop())
return G.sylow_subgroup(p)
if not self.is_transitive():
# apply _sylow_reduce to orbit actions
orbits = sorted(orbits, key=len)
omega1 = orbits.pop()
omega2 = orbits[0].union(*orbits)
mu = orbit_homomorphism(self, omega1)
nu = orbit_homomorphism(self, omega2)
return _sylow_reduce(mu, nu)
blocks = self.minimal_blocks()
if len(blocks) > 1:
# apply _sylow_reduce to block system actions
mu = block_homomorphism(self, blocks[0])
nu = block_homomorphism(self, blocks[1])
return _sylow_reduce(mu, nu)
elif len(blocks) == 1:
block = list(blocks)[0]
if any(e != 0 for e in block):
# self is imprimitive
mu = block_homomorphism(self, block)
if not is_p_group(mu.image())[0]:
S = mu.image().sylow_subgroup(p)
return mu.invert_subgroup(S).sylow_subgroup(p)
# find an element of order p
g = self.random()
g_order = g.order()
while g_order % p != 0 or g_order == 0:
g = self.random()
g_order = g.order()
g = g**(g_order // p)
if order % p**2 != 0:
return PermutationGroup(g)
C = self.centralizer(g)
while C.order() % p**n != 0:
S = C.sylow_subgroup(p)
s_order = S.order()
Z = S.center()
P = Z._p_elements_group(p)
h = P.random()
C_h = self.centralizer(h)
while C_h.order() % p*s_order != 0:
h = P.random()
C_h = self.centralizer(h)
C = C_h
return C.sylow_subgroup(p)
def _block_verify(self, L, alpha):
delta = sorted(self.orbit(alpha))
# p[i] will be the number of the block
# delta[i] belongs to
p = [-1]*len(delta)
blocks = [-1]*len(delta)
B = [[]] # future list of blocks
u = [0]*len(delta) # u[i] in L s.t. alpha^u[i] = B[0][i]
t = L.orbit_transversal(alpha, pairs=True)
for a, beta in t:
B[0].append(a)
i_a = delta.index(a)
p[i_a] = 0
blocks[i_a] = alpha
u[i_a] = beta
rho = 0
m = 0 # number of blocks - 1
while rho <= m:
beta = B[rho][0]
for g in self.generators:
d = beta^g
i_d = delta.index(d)
sigma = p[i_d]
if sigma < 0:
# define a new block
m += 1
sigma = m
u[i_d] = u[delta.index(beta)]*g
p[i_d] = sigma
rep = d
blocks[i_d] = rep
newb = [rep]
for gamma in B[rho][1:]:
i_gamma = delta.index(gamma)
d = gamma^g
i_d = delta.index(d)
if p[i_d] < 0:
u[i_d] = u[i_gamma]*g
p[i_d] = sigma
blocks[i_d] = rep
newb.append(d)
else:
# B[rho] is not a block
s = u[i_gamma]*g*u[i_d]**(-1)
return False, s
B.append(newb)
else:
for h in B[rho][1:]:
if h^g not in B[sigma]:
# B[rho] is not a block
s = u[delta.index(beta)]*g*u[i_d]**(-1)
return False, s
rho += 1
return True, blocks
def _verify(H, K, phi, z, alpha):
'''
Return a list of relators ``rels`` in generators ``gens`_h` that
are mapped to ``H.generators`` by ``phi`` so that given a finite
presentation <gens_k | rels_k> of ``K`` on a subset of ``gens_h``
<gens_h | rels_k + rels> is a finite presentation of ``H``.
Explanation
===========
``H`` should be generated by the union of ``K.generators`` and ``z``
(a single generator), and ``H.stabilizer(alpha) == K``; ``phi`` is a
canonical injection from a free group into a permutation group
containing ``H``.
The algorithm is described in [1], Chapter 6.
Examples
========
>>> from sympy.combinatorics import free_group, Permutation, PermutationGroup
>>> from sympy.combinatorics.homomorphisms import homomorphism
>>> from sympy.combinatorics.fp_groups import FpGroup
>>> H = PermutationGroup(Permutation(0, 2), Permutation (1, 5))
>>> K = PermutationGroup(Permutation(5)(0, 2))
>>> F = free_group("x_0 x_1")[0]
>>> gens = F.generators
>>> phi = homomorphism(F, H, F.generators, H.generators)
>>> rels_k = [gens[0]**2] # relators for presentation of K
>>> z= Permutation(1, 5)
>>> check, rels_h = H._verify(K, phi, z, 1)
>>> check
True
>>> rels = rels_k + rels_h
>>> G = FpGroup(F, rels) # presentation of H
>>> G.order() == H.order()
True
See also
========
strong_presentation, presentation, stabilizer
'''
orbit = H.orbit(alpha)
beta = alpha^(z**-1)
K_beta = K.stabilizer(beta)
# orbit representatives of K_beta
gammas = [alpha, beta]
orbits = list({tuple(K_beta.orbit(o)) for o in orbit})
orbit_reps = [orb[0] for orb in orbits]
for rep in orbit_reps:
if rep not in gammas:
gammas.append(rep)
# orbit transversal of K
betas = [alpha, beta]
transversal = {alpha: phi.invert(H.identity), beta: phi.invert(z**-1)}
for s, g in K.orbit_transversal(beta, pairs=True):
if s not in transversal:
transversal[s] = transversal[beta]*phi.invert(g)
union = K.orbit(alpha).union(K.orbit(beta))
while (len(union) < len(orbit)):
for gamma in gammas:
if gamma in union:
r = gamma^z
if r not in union:
betas.append(r)
transversal[r] = transversal[gamma]*phi.invert(z)
for s, g in K.orbit_transversal(r, pairs=True):
if s not in transversal:
transversal[s] = transversal[r]*phi.invert(g)
union = union.union(K.orbit(r))
break
# compute relators
rels = []
for b in betas:
k_gens = K.stabilizer(b).generators
for y in k_gens:
new_rel = transversal[b]
gens = K.generator_product(y, original=True)
for g in gens[::-1]:
new_rel = new_rel*phi.invert(g)
new_rel = new_rel*transversal[b]**-1
perm = phi(new_rel)
try:
gens = K.generator_product(perm, original=True)
except ValueError:
return False, perm
for g in gens:
new_rel = new_rel*phi.invert(g)**-1
if new_rel not in rels:
rels.append(new_rel)
for gamma in gammas:
new_rel = transversal[gamma]*phi.invert(z)*transversal[gamma^z]**-1
perm = phi(new_rel)
try:
gens = K.generator_product(perm, original=True)
except ValueError:
return False, perm
for g in gens:
new_rel = new_rel*phi.invert(g)**-1
if new_rel not in rels:
rels.append(new_rel)
return True, rels
def strong_presentation(self):
'''
Return a strong finite presentation of group. The generators
of the returned group are in the same order as the strong
generators of group.
The algorithm is based on Sims' Verify algorithm described
in [1], Chapter 6.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> P = DihedralGroup(4)
>>> G = P.strong_presentation()
>>> P.order() == G.order()
True
See Also
========
presentation, _verify
'''
from sympy.combinatorics.fp_groups import (FpGroup,
simplify_presentation)
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.homomorphisms import (block_homomorphism,
homomorphism, GroupHomomorphism)
strong_gens = self.strong_gens[:]
stabs = self.basic_stabilizers[:]
base = self.base[:]
# injection from a free group on len(strong_gens)
# generators into G
gen_syms = [('x_%d'%i) for i in range(len(strong_gens))]
F = free_group(', '.join(gen_syms))[0]
phi = homomorphism(F, self, F.generators, strong_gens)
H = PermutationGroup(self.identity)
while stabs:
alpha = base.pop()
K = H
H = stabs.pop()
new_gens = [g for g in H.generators if g not in K]
if K.order() == 1:
z = new_gens.pop()
rels = [F.generators[-1]**z.order()]
intermediate_gens = [z]
K = PermutationGroup(intermediate_gens)
# add generators one at a time building up from K to H
while new_gens:
z = new_gens.pop()
intermediate_gens = [z] + intermediate_gens
K_s = PermutationGroup(intermediate_gens)
orbit = K_s.orbit(alpha)
orbit_k = K.orbit(alpha)
# split into cases based on the orbit of K_s
if orbit_k == orbit:
if z in K:
rel = phi.invert(z)
perm = z
else:
t = K.orbit_rep(alpha, alpha^z)
rel = phi.invert(z)*phi.invert(t)**-1
perm = z*t**-1
for g in K.generator_product(perm, original=True):
rel = rel*phi.invert(g)**-1
new_rels = [rel]
elif len(orbit_k) == 1:
# `success` is always true because `strong_gens`
# and `base` are already a verified BSGS. Later
# this could be changed to start with a randomly
# generated (potential) BSGS, and then new elements
# would have to be appended to it when `success`
# is false.
success, new_rels = K_s._verify(K, phi, z, alpha)
else:
# K.orbit(alpha) should be a block
# under the action of K_s on K_s.orbit(alpha)
check, block = K_s._block_verify(K, alpha)
if check:
# apply _verify to the action of K_s
# on the block system; for convenience,
# add the blocks as additional points
# that K_s should act on
t = block_homomorphism(K_s, block)
m = t.codomain.degree # number of blocks
d = K_s.degree
# conjugating with p will shift
# permutations in t.image() to
# higher numbers, e.g.
# p*(0 1)*p = (m m+1)
p = Permutation()
for i in range(m):
p *= Permutation(i, i+d)
t_img = t.images
# combine generators of K_s with their
# action on the block system
images = {g: g*p*t_img[g]*p for g in t_img}
for g in self.strong_gens[:-len(K_s.generators)]:
images[g] = g
K_s_act = PermutationGroup(list(images.values()))
f = GroupHomomorphism(self, K_s_act, images)
K_act = PermutationGroup([f(g) for g in K.generators])
success, new_rels = K_s_act._verify(K_act, f.compose(phi), f(z), d)
for n in new_rels:
if n not in rels:
rels.append(n)
K = K_s
group = FpGroup(F, rels)
return simplify_presentation(group)
def presentation(self, eliminate_gens=True):
'''
Return an `FpGroup` presentation of the group.
The algorithm is described in [1], Chapter 6.1.
'''
from sympy.combinatorics.fp_groups import (FpGroup,
simplify_presentation)
from sympy.combinatorics.coset_table import CosetTable
from sympy.combinatorics.free_groups import free_group
from sympy.combinatorics.homomorphisms import homomorphism
if self._fp_presentation:
return self._fp_presentation
def _factor_group_by_rels(G, rels):
if isinstance(G, FpGroup):
rels.extend(G.relators)
return FpGroup(G.free_group, list(set(rels)))
return FpGroup(G, rels)
gens = self.generators
len_g = len(gens)
if len_g == 1:
order = gens[0].order()
# handle the trivial group
if order == 1:
return free_group([])[0]
F, x = free_group('x')
return FpGroup(F, [x**order])
if self.order() > 20:
half_gens = self.generators[0:(len_g+1)//2]
else:
half_gens = []
H = PermutationGroup(half_gens)
H_p = H.presentation()
len_h = len(H_p.generators)
C = self.coset_table(H)
n = len(C) # subgroup index
gen_syms = [('x_%d'%i) for i in range(len(gens))]
F = free_group(', '.join(gen_syms))[0]
# mapping generators of H_p to those of F
images = [F.generators[i] for i in range(len_h)]
R = homomorphism(H_p, F, H_p.generators, images, check=False)
# rewrite relators
rels = R(H_p.relators)
G_p = FpGroup(F, rels)
# injective homomorphism from G_p into self
T = homomorphism(G_p, self, G_p.generators, gens)
C_p = CosetTable(G_p, [])
C_p.table = [[None]*(2*len_g) for i in range(n)]
# initiate the coset transversal
transversal = [None]*n
transversal[0] = G_p.identity
# fill in the coset table as much as possible
for i in range(2*len_h):
C_p.table[0][i] = 0
gamma = 1
for alpha, x in product(range(n), range(2*len_g)):
beta = C[alpha][x]
if beta == gamma:
gen = G_p.generators[x//2]**((-1)**(x % 2))
transversal[beta] = transversal[alpha]*gen
C_p.table[alpha][x] = beta
C_p.table[beta][x + (-1)**(x % 2)] = alpha
gamma += 1
if gamma == n:
break
C_p.p = list(range(n))
beta = x = 0
while not C_p.is_complete():
# find the first undefined entry
while C_p.table[beta][x] == C[beta][x]:
x = (x + 1) % (2*len_g)
if x == 0:
beta = (beta + 1) % n
# define a new relator
gen = G_p.generators[x//2]**((-1)**(x % 2))
new_rel = transversal[beta]*gen*transversal[C[beta][x]]**-1
perm = T(new_rel)
nxt = G_p.identity
for s in H.generator_product(perm, original=True):
nxt = nxt*T.invert(s)**-1
new_rel = new_rel*nxt
# continue coset enumeration
G_p = _factor_group_by_rels(G_p, [new_rel])
C_p.scan_and_fill(0, new_rel)
C_p = G_p.coset_enumeration([], strategy="coset_table",
draft=C_p, max_cosets=n, incomplete=True)
self._fp_presentation = simplify_presentation(G_p)
return self._fp_presentation
def polycyclic_group(self):
"""
Return the PolycyclicGroup instance with below parameters:
Explanation
===========
* pc_sequence : Polycyclic sequence is formed by collecting all
the missing generators between the adjacent groups in the
derived series of given permutation group.
* pc_series : Polycyclic series is formed by adding all the missing
generators of ``der[i+1]`` in ``der[i]``, where ``der`` represents
the derived series.
* relative_order : A list, computed by the ratio of adjacent groups in
pc_series.
"""
from sympy.combinatorics.pc_groups import PolycyclicGroup
if not self.is_polycyclic:
raise ValueError("The group must be solvable")
der = self.derived_series()
pc_series = []
pc_sequence = []
relative_order = []
pc_series.append(der[-1])
der.reverse()
for i in range(len(der)-1):
H = der[i]
for g in der[i+1].generators:
if g not in H:
H = PermutationGroup([g] + H.generators)
pc_series.insert(0, H)
pc_sequence.insert(0, g)
G1 = pc_series[0].order()
G2 = pc_series[1].order()
relative_order.insert(0, G1 // G2)
return PolycyclicGroup(pc_sequence, pc_series, relative_order, collector=None)
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha `\{g(\alpha) | g \in G\}` as a set.
Explanation
===========
The time complexity of the algorithm used here is `O(|Orb|*r)` where
`|Orb|` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.perm_groups import _orbit
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
{0, 1, 2}
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
{0, 1, 2, 3, 4, 5, 6}
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] == False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = tuple([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = {alpha}
for b in orb:
for gen in gens:
temp = frozenset([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return {tuple(x) for x in orb}
def _orbits(degree, generators):
"""Compute the orbits of G.
If ``rep=False`` it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import _orbits
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[{0, 1, 2}]
"""
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False, slp=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
Explanation
===========
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
`Orb = \{g(\alpha) | g \in G\}` is a set
`\{g_\beta | g_\beta(\alpha) = \beta\}` for `\beta \in Orb`.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
`(\beta, g_\beta)`. For a proof of correctness, see [1], p.79
if ``af`` is ``True``, the transversal elements are given in
array form.
If `slp` is `True`, a dictionary `{beta: slp_beta}` is returned
for `\beta \in Orb` where `slp_beta` is a list of indices of the
generators in `generators` s.t. if `slp_beta = [i_1 \dots i_n]`
`g_\beta = generators[i_n] \times \dots \times generators[i_1]`.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.perm_groups import _orbit_transversal
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
"""
tr = [(alpha, list(range(degree)))]
slp_dict = {alpha: []}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
px_slp = slp_dict[x]
for gen in gens:
temp = gen[x]
if used[temp] == False:
slp_dict[temp] = [gens.index(gen)] + px_slp
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
if not slp:
return tr
return tr, slp_dict
if af:
tr = [y for _, y in tr]
if not slp:
return tr
return tr, slp_dict
tr = [_af_new(y) for _, y in tr]
if not slp:
return tr
return tr, slp_dict
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
Explanation
===========
The stabilizer of `\alpha` is the group `G_\alpha =
\{g \in G | g(\alpha) = \alpha\}`.
For a proof of correctness, see [1], p.79.
degree : degree of G
generators : generators of G
Examples
========
>>> from sympy.combinatorics.perm_groups import _stabilizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[(5)(0 4)(1 3), (5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
| PermutationGroup |
python | ray-project__ray | python/ray/tune/logger/logger.py | {
"start": 7290,
"end": 8122
} | class ____(yaml.SafeDumper):
def represent_sequence(self, tag, sequence, flow_style=None):
if len(sequence) > _SEQUENCE_LEN_FLOW_STYLE:
return super().represent_sequence(tag, sequence, flow_style=True)
return super().represent_sequence(tag, sequence, flow_style=flow_style)
@DeveloperAPI
def pretty_print(result, exclude: Optional[Set[str]] = None):
result = result.copy()
result.update(config=None) # drop config from pretty print
result.update(hist_stats=None) # drop hist_stats from pretty print
out = {}
for k, v in result.items():
if v is not None and (exclude is None or k not in exclude):
out[k] = v
cleaned = json.dumps(out, cls=SafeFallbackEncoder)
return yaml.dump(json.loads(cleaned), Dumper=_RayDumper, default_flow_style=False)
| _RayDumper |
python | PrefectHQ__prefect | src/prefect/server/schemas/schedules.py | {
"start": 18254,
"end": 27126
} | class ____(PrefectBaseModel):
"""
RRule schedule, based on the iCalendar standard
([RFC 5545](https://datatracker.ietf.org/doc/html/rfc5545)) as
implemented in `dateutils.rrule`.
RRules are appropriate for any kind of calendar-date manipulation, including
irregular intervals, repetition, exclusions, week day or day-of-month
adjustments, and more.
Note that as a calendar-oriented standard, `RRuleSchedules` are sensitive to
to the initial timezone provided. A 9am daily schedule with a daylight saving
time-aware start date will maintain a local 9am time through DST boundaries;
a 9am daily schedule with a UTC start date will maintain a 9am UTC time.
Args:
rrule (str): a valid RRule string
timezone (str, optional): a valid timezone string
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
rrule: str
timezone: Optional[TimeZone] = "UTC"
@field_validator("rrule")
@classmethod
def validate_rrule_str(cls, v: str) -> str:
return validate_rrule_string(v)
@classmethod
def from_rrule(
cls, rrule: dateutil.rrule.rrule | dateutil.rrule.rruleset
) -> "RRuleSchedule":
if isinstance(rrule, dateutil.rrule.rrule):
if rrule._dtstart.tzinfo is not None:
timezone = getattr(rrule._dtstart.tzinfo, "name", None) or getattr(
rrule._dtstart.tzinfo, "key", "UTC"
)
else:
timezone = "UTC"
return RRuleSchedule(rrule=str(rrule), timezone=timezone)
elif isinstance(rrule, dateutil.rrule.rruleset):
dtstarts = [rr._dtstart for rr in rrule._rrule if rr._dtstart is not None]
unique_dstarts = set(
create_datetime_instance(d).astimezone(ZoneInfo("UTC"))
for d in dtstarts
)
unique_timezones = set(d.tzinfo for d in dtstarts if d.tzinfo is not None)
if len(unique_timezones) > 1:
raise ValueError(
f"rruleset has too many dtstart timezones: {unique_timezones}"
)
if len(unique_dstarts) > 1:
raise ValueError(f"rruleset has too many dtstarts: {unique_dstarts}")
if unique_dstarts and unique_timezones:
tzinfo = dtstarts[0].tzinfo
timezone = getattr(tzinfo, "name", None) or getattr(
tzinfo, "key", "UTC"
)
else:
timezone = "UTC"
rruleset_string = ""
if rrule._rrule:
rruleset_string += "\n".join(str(r) for r in rrule._rrule)
if rrule._exrule:
rruleset_string += "\n" if rruleset_string else ""
rruleset_string += "\n".join(str(r) for r in rrule._exrule).replace(
"RRULE", "EXRULE"
)
if rrule._rdate:
rruleset_string += "\n" if rruleset_string else ""
rruleset_string += "RDATE:" + ",".join(
rd.strftime("%Y%m%dT%H%M%SZ") for rd in rrule._rdate
)
if rrule._exdate:
rruleset_string += "\n" if rruleset_string else ""
rruleset_string += "EXDATE:" + ",".join(
exd.strftime("%Y%m%dT%H%M%SZ") for exd in rrule._exdate
)
return RRuleSchedule(rrule=rruleset_string, timezone=timezone)
else:
raise ValueError(f"Invalid RRule object: {rrule}")
def to_rrule(self) -> dateutil.rrule.rrule:
"""
Since rrule doesn't properly serialize/deserialize timezones, we localize dates
here
"""
rrule = dateutil.rrule.rrulestr(
self.rrule,
dtstart=DEFAULT_ANCHOR_DATE,
cache=True,
)
timezone = dateutil.tz.gettz(self.timezone)
if isinstance(rrule, dateutil.rrule.rrule):
kwargs = dict(dtstart=rrule._dtstart.replace(tzinfo=timezone))
if rrule._until:
kwargs.update(
until=rrule._until.replace(tzinfo=timezone),
)
return rrule.replace(**kwargs)
elif isinstance(rrule, dateutil.rrule.rruleset):
# update rrules
localized_rrules = []
for rr in rrule._rrule:
kwargs = dict(dtstart=rr._dtstart.replace(tzinfo=timezone))
if rr._until:
kwargs.update(
until=rr._until.replace(tzinfo=timezone),
)
localized_rrules.append(rr.replace(**kwargs))
rrule._rrule = localized_rrules
# update exrules
localized_exrules = []
for exr in rrule._exrule:
kwargs = dict(dtstart=exr._dtstart.replace(tzinfo=timezone))
if exr._until:
kwargs.update(
until=exr._until.replace(tzinfo=timezone),
)
localized_exrules.append(exr.replace(**kwargs))
rrule._exrule = localized_exrules
# update rdates
localized_rdates = []
for rd in rrule._rdate:
localized_rdates.append(rd.replace(tzinfo=timezone))
rrule._rdate = localized_rdates
# update exdates
localized_exdates = []
for exd in rrule._exdate:
localized_exdates.append(exd.replace(tzinfo=timezone))
rrule._exdate = localized_exdates
return rrule
async def get_dates(
self,
n: Optional[int] = None,
start: datetime.datetime = None,
end: datetime.datetime = None,
) -> List[DateTime]:
"""Retrieves dates from the schedule. Up to 1,000 candidate dates are checked
following the start date.
Args:
n (int): The number of dates to generate
start (datetime.datetime, optional): The first returned date will be on or
after this date. Defaults to None. If a timezone-naive datetime is
provided, it is assumed to be in the schedule's timezone.
end (datetime.datetime, optional): The maximum scheduled date to return. If
a timezone-naive datetime is provided, it is assumed to be in the
schedule's timezone.
Returns:
List[DateTime]: A list of dates
"""
return sorted(self._get_dates_generator(n=n, start=start, end=end))
def _get_dates_generator(
self,
n: Optional[int] = None,
start: Optional[datetime.datetime] = None,
end: Optional[datetime.datetime] = None,
) -> Generator[DateTime, None, None]:
"""Retrieves dates from the schedule. Up to 1,000 candidate dates are checked
following the start date.
Args:
n (int): The number of dates to generate
start (datetime.datetime, optional): The first returned date will be on or
after this date. Defaults to the current date. If a timezone-naive
datetime is provided, it is assumed to be in the schedule's timezone.
end (datetime.datetime, optional): No returned date will exceed this date.
If a timezone-naive datetime is provided, it is assumed to be in the
schedule's timezone.
Returns:
List[DateTime]: a list of dates
"""
if start is None:
start = now("UTC")
start, end = _prepare_scheduling_start_and_end(start, end, self.timezone)
if n is None:
# if an end was supplied, we do our best to supply all matching dates (up
# to MAX_ITERATIONS)
if end is not None:
n = MAX_ITERATIONS
else:
n = 1
dates = set()
counter = 0
# pass count = None to account for discrepancies with duplicates around DST
# boundaries
for next_date in self.to_rrule().xafter(start, count=None, inc=True):
next_date = create_datetime_instance(next_date).astimezone(
ZoneInfo(self.timezone)
)
# if the end date was exceeded, exit
if end and next_date > end:
break
# ensure no duplicates; weird things can happen with DST
if next_date not in dates:
dates.add(next_date)
yield next_date
# if enough dates have been collected or enough attempts were made, exit
if len(dates) >= n or counter > MAX_ITERATIONS:
break
counter += 1
SCHEDULE_TYPES = Union[IntervalSchedule, CronSchedule, RRuleSchedule]
| RRuleSchedule |
python | graphql-python__graphene | graphene/types/datetime.py | {
"start": 1327,
"end": 2450
} | class ____(Scalar):
"""
The `DateTime` scalar type represents a DateTime
value as specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(dt):
if not isinstance(dt, (datetime.datetime, datetime.date)):
raise GraphQLError(f"DateTime cannot represent value: {repr(dt)}")
return dt.isoformat()
@classmethod
def parse_literal(cls, node, _variables=None):
if not isinstance(node, StringValueNode):
raise GraphQLError(
f"DateTime cannot represent non-string value: {print_ast(node)}"
)
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
if isinstance(value, datetime.datetime):
return value
if not isinstance(value, str):
raise GraphQLError(
f"DateTime cannot represent non-string value: {repr(value)}"
)
try:
return isoparse(value)
except ValueError:
raise GraphQLError(f"DateTime cannot represent value: {repr(value)}")
| DateTime |
python | pdm-project__pdm | src/pdm/models/specifiers.py | {
"start": 2196,
"end": 9836
} | class ____(SpecifierSet):
"""A custom SpecifierSet that supports merging with logic operators (&, |)."""
PY_MAX_MINOR_VERSION = _read_max_versions()
MAX_MAJOR_VERSION = max(PY_MAX_MINOR_VERSION)[:1].bump()
__slots__ = ("_logic", "_prereleases", "_specs")
def __init__(self, spec: str | VersionSpecifier = "") -> None:
if spec == "<empty>":
spec = EmptySpecifier()
if isinstance(spec, BaseSpecifier):
super().__init__(self._normalize(spec))
self._logic = spec
return
try:
if spec == "*": # pragma: no cover
spec = ""
super().__init__(fix_legacy_specifier(spec))
self._logic = from_specifierset(self)
except ValueError:
raise InvalidPyVersion(f"Invalid specifier: {spec}") from None
def __hash__(self) -> int:
return hash(self._logic)
def __str__(self) -> str:
if self.is_empty():
return "<empty>"
return super().__str__()
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PySpecSet):
return NotImplemented
return self._logic == other._logic
def is_empty(self) -> bool:
"""Check whether the specifierset contains any valid versions."""
return self._logic.is_empty()
def is_any(self) -> bool:
"""Return True if the specifierset accepts all versions."""
return self._logic.is_any()
@classmethod
def _normalize(cls, spec: VersionSpecifier) -> str:
if spec.is_empty():
return ""
if not isinstance(spec, UnionSpecifier):
return str(spec)
ranges, next_ranges = itertools.tee(sorted(spec.ranges))
next(next_ranges, None)
whole_range = RangeSpecifier(
min=spec.ranges[0].min,
max=spec.ranges[-1].max,
include_min=spec.ranges[0].include_min,
include_max=spec.ranges[-1].include_max,
)
parts = [] if whole_range.is_any() else [str(whole_range)]
for left, right in zip(ranges, next_ranges):
assert left.max is not None and right.min is not None
start = Version(left.max.release).complete()
end = Version(right.min.release).complete()
if left.include_max:
start = start.bump()
if not right.include_min:
end = end.bump()
parts.extend(f"!={v}" for v in cls._populate_version_range(start, end))
return ",".join(parts)
def __repr__(self) -> str:
return f"<PySpecSet {self}>"
def __and__(self, other: Any) -> PySpecSet:
if isinstance(other, PySpecSet):
return type(self)(self._logic & other._logic)
elif isinstance(other, VersionSpecifier):
return type(self)(self._logic & other)
return NotImplemented
def __or__(self, other: Any) -> PySpecSet:
if isinstance(other, PySpecSet):
return type(self)(self._logic | other._logic)
elif isinstance(other, VersionSpecifier):
return type(self)(self._logic | other)
return NotImplemented
@classmethod
def _populate_version_range(cls, lower: Version, upper: Version) -> Iterable[Version]:
"""Expand the version range to a collection of versions to exclude,
taking the released python versions into consideration.
"""
assert lower < upper
prev = lower
while prev < upper:
if prev[-2:] == Version((0, 0)): # X.0.0
cur = prev.bump(0) # X+1.0.0
if cur <= upper: # It is still within the range
yield Version((prev[0], "*")) # Exclude the whole major series: X.*
prev = cur
continue
if prev[-1] == 0: # X.Y.0
cur = prev.bump(1) # X.Y+1.0
if cur <= upper: # It is still within the range
yield prev[:2].complete("*") # Exclude X.Y.*
prev = (
prev.bump(0) if cur.is_py2 and cast(int, cur[1]) > cls.PY_MAX_MINOR_VERSION[cur[:1]] else cur
) # If prev is 2.7, next is 3.0, otherwise next is X.Y+1.0
continue
while prev < upper:
# Iterate each version from X.Y.0(prev) to X.Y.Z(upper)
yield prev
prev = prev.bump()
break
# Can't produce any wildcard versions
cur = prev.bump(1)
if cur <= upper: # X.Y+1.0 is still within the range
current_max = cls.PY_MAX_MINOR_VERSION[prev[:2]]
for z in range(cast(int, prev[2]), current_max + 1):
yield prev[:2].complete(z)
prev = prev.bump(0) if cur.is_py2 and cast(int, cur[1]) > cls.PY_MAX_MINOR_VERSION[cur[:1]] else cur
else: # Produce each version from X.Y.Z to X.Y.W
while prev < upper:
yield prev
prev = prev.bump()
break
@lru_cache
def is_superset(self, other: str | PySpecSet) -> bool:
if self.is_empty():
return False
this = _fix_py4k(self._logic)
if this.is_any():
return True
if isinstance(other, str):
other = type(self)(other)
return this & other._logic == other._logic
@lru_cache
def is_subset(self, other: str | PySpecSet) -> bool:
if self.is_empty():
return False
if isinstance(other, str):
other = type(self)(other)
that = _fix_py4k(other._logic)
if that.is_any():
return True
return self._logic & that == self._logic
def as_marker_string(self) -> str:
spec = self._logic
if spec.is_empty():
raise InvalidPyVersion("Impossible specifier")
if spec.is_any():
return ""
return _convert_spec(cast(VersionSpecifier, spec))
def _convert_spec(specifier: VersionSpecifier) -> str:
if isinstance(specifier, UnionSpecifier):
return " or ".join(_convert_spec(s) for s in specifier.ranges)
result: list[str] = []
excludes: list[str] = []
full_excludes: list[str] = []
for spec in sorted(specifier.to_specifierset(), key=attrgetter("version")):
op, version = spec.operator, spec.version
if len(version.split(".")) < 3:
key = "python_version"
else:
key = "python_full_version"
if version[-2:] == ".*":
version = version[:-2]
key = "python_version"
if op == "!=":
if key == "python_version":
excludes.append(version)
else:
full_excludes.append(version)
else:
result.append(f"{key}{op}{version!r}")
if excludes:
result.append("python_version not in {!r}".format(", ".join(sorted(excludes))))
if full_excludes:
result.append("python_full_version not in {!r}".format(", ".join(sorted(full_excludes))))
return " and ".join(result)
def _fix_py4k(spec: VersionSpecifier) -> VersionSpecifier:
"""If the upper bound is 4.0, replace it with unlimited."""
if isinstance(spec, UnionSpecifier):
*pre, last = spec.ranges
return UnionSpecifier([*pre, _fix_py4k(last)])
if isinstance(spec, RangeSpecifier) and spec.max == parse_version("4.0"):
return dataclasses.replace(spec, max=None, include_max=False)
return spec
| PySpecSet |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 1839,
"end": 1930
} | class ____(metaclass=MetaclassE):
x = DescriptorE()
ClassE.x
ClassE().x
ClassE.y
| ClassE |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 18408,
"end": 21562
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| AriaTextAttention |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 9246,
"end": 9951
} | class ____:
def setup(self):
nrows = 10000
data = np.random.randn(nrows, 10)
arrays = np.tile(np.random.randn(3, nrows // 100), 100)
idx = MultiIndex.from_arrays(arrays)
self.df3 = DataFrame(data, index=idx)
self.df4 = DataFrame(data, index=np.random.randn(nrows))
self.df_tall = DataFrame(np.random.randn(nrows, 10))
self.df_wide = DataFrame(np.random.randn(10, nrows))
def time_html_repr_trunc_mi(self):
self.df3._repr_html_()
def time_html_repr_trunc_si(self):
self.df4._repr_html_()
def time_repr_tall(self):
repr(self.df_tall)
def time_frame_repr_wide(self):
repr(self.df_wide)
| Repr |
python | tiangolo__fastapi | tests/test_response_code_no_body.py | {
"start": 318,
"end": 3315
} | class ____(BaseModel):
errors: typing.List[Error]
@app.get(
"/a",
status_code=204,
response_class=JsonApiResponse,
responses={500: {"description": "Error", "model": JsonApiError}},
)
async def a():
pass
@app.get("/b", responses={204: {"description": "No Content"}})
async def b():
pass # pragma: no cover
client = TestClient(app)
def test_get_response():
response = client.get("/a")
assert response.status_code == 204, response.text
assert "content-length" not in response.headers
assert response.content == b""
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/a": {
"get": {
"responses": {
"500": {
"description": "Error",
"content": {
"application/vnd.api+json": {
"schema": {
"$ref": "#/components/schemas/JsonApiError"
}
}
},
},
"204": {"description": "Successful Response"},
},
"summary": "A",
"operationId": "a_a_get",
}
},
"/b": {
"get": {
"responses": {
"204": {"description": "No Content"},
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
},
"summary": "B",
"operationId": "b_b_get",
}
},
},
"components": {
"schemas": {
"Error": {
"title": "Error",
"required": ["status", "title"],
"type": "object",
"properties": {
"status": {"title": "Status", "type": "string"},
"title": {"title": "Title", "type": "string"},
},
},
"JsonApiError": {
"title": "JsonApiError",
"required": ["errors"],
"type": "object",
"properties": {
"errors": {
"title": "Errors",
"type": "array",
"items": {"$ref": "#/components/schemas/Error"},
}
},
},
}
},
}
| JsonApiError |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/groupby.py | {
"start": 22811,
"end": 25296
} | class ____(DefaultMethod):
"""Builder for default-to-pandas GroupBy aggregation functions."""
_groupby_cls = GroupBy
OBJECT_TYPE = "GroupBy"
@classmethod
def register(cls, func, **kwargs):
"""
Build default-to-pandas GroupBy aggregation function.
Parameters
----------
func : callable or str
Default aggregation function. If aggregation function is not specified
via groupby arguments, then `func` function is used.
**kwargs : kwargs
Additional arguments that will be passed to function builder.
Returns
-------
callable
Functiom that takes query compiler and defaults to pandas to do GroupBy
aggregation.
"""
return super().register(
cls._groupby_cls.build_groupby(func), fn_name=func.__name__, **kwargs
)
# This specifies a `pandas.DataFrameGroupBy` method to pass the `agg_func` to,
# it's based on `how` to apply it. Going by pandas documentation:
# 1. `.aggregate(func)` applies func row/column wise.
# 2. `.apply(func)` applies func to a DataFrames, holding a whole group (group-wise).
# 3. `.transform(func)` is the same as `.apply()` but also broadcast the `func`
# result to the group's original shape.
# 4. 'direct' mode means that the passed `func` has to be applied directly
# to the `pandas.DataFrameGroupBy` object.
_aggregation_methods_dict = {
"axis_wise": pandas.core.groupby.DataFrameGroupBy.aggregate,
"group_wise": pandas.core.groupby.DataFrameGroupBy.apply,
"transform": pandas.core.groupby.DataFrameGroupBy.transform,
"direct": lambda grp, func, *args, **kwargs: func(grp, *args, **kwargs),
}
@classmethod
def get_aggregation_method(cls, how):
"""
Return `pandas.DataFrameGroupBy` method that implements the passed `how` UDF applying strategy.
Parameters
----------
how : {"axis_wise", "group_wise", "transform"}
`how` parameter of the ``BaseQueryCompiler.groupby_agg``.
Returns
-------
callable(pandas.DataFrameGroupBy, callable, *args, **kwargs) -> [pandas.DataFrame | pandas.Series]
Notes
-----
Visit ``BaseQueryCompiler.groupby_agg`` doc-string for more information about `how` parameter.
"""
return cls._aggregation_methods_dict[how]
| GroupByDefault |
python | Lightning-AI__lightning | src/lightning/pytorch/demos/transformer.py | {
"start": 843,
"end": 2901
} | class ____(nn.Module):
def __init__(
self,
vocab_size: int = 33278, # default for WikiText2
ninp: int = 200,
nhead: int = 2,
nhid: int = 200,
nlayers: int = 2,
dropout: float = 0.2,
) -> None:
super().__init__()
self.pos_encoder = PositionalEncoding(ninp, dropout)
self.embedding = nn.Embedding(vocab_size, ninp)
self.transformer = nn.Transformer(
d_model=ninp,
nhead=nhead,
num_encoder_layers=nlayers,
num_decoder_layers=nlayers,
dim_feedforward=nhid,
dropout=dropout,
batch_first=True,
)
self.decoder = nn.Linear(ninp, vocab_size)
self.ninp = ninp
self.vocab_size = vocab_size
self.src_mask: Optional[Tensor] = None
def generate_square_subsequent_mask(self, size: int) -> Tensor:
"""Generate a square mask for the sequence to prevent future tokens from being seen."""
mask = torch.triu(torch.ones(size, size), diagonal=1)
mask = mask.float().masked_fill(mask == 1, float("-inf")).masked_fill(mask == 0, 0.0)
return mask
def forward(self, inputs: Tensor, target: Tensor, mask: Optional[Tensor] = None) -> Tensor:
_, t = inputs.shape
# Generate source mask to prevent future token leakage
if self.src_mask is None or self.src_mask.size(0) != t:
self.src_mask = self.generate_square_subsequent_mask(t).to(inputs.device)
# Generate target mask if not provided
if mask is None:
mask = self.generate_square_subsequent_mask(t).to(inputs.device)
src = self.pos_encoder(self.embedding(inputs) * math.sqrt(self.ninp))
target = self.pos_encoder(self.embedding(target) * math.sqrt(self.ninp))
output = self.transformer(src, target, tgt_mask=mask)
output = self.decoder(output)
output = F.log_softmax(output, dim=-1)
output = output.view(-1, self.vocab_size)
return output
| Transformer |
python | huggingface__transformers | src/transformers/models/roformer/modeling_roformer.py | {
"start": 29224,
"end": 34846
} | class ____(RoFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = RoFormerEmbeddings(config)
if config.embedding_size != config.hidden_size:
self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
self.encoder = RoFormerEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[BaseModelOutputWithPastAndCrossAttentions, tuple[torch.Tensor]]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
if hasattr(self, "embeddings_project"):
embedding_output = self.embeddings_project(embedding_output)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=sequence_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring
| RoFormerModel |
python | pytorch__pytorch | .ci/lumen_cli/tests/test_vllm.py | {
"start": 217,
"end": 3763
} | class ____(unittest.TestCase):
@patch(f"{_VLLM_BUILD_MODULE}.local_image_exists", return_value=True)
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=True)
@patch(
"cli.lib.common.envs_helper.env_path_optional",
side_effect=lambda name, default=None, resolve=True: {
"DOCKERFILE_PATH": Path("/abs/vllm/Dockerfile"),
"TORCH_WHEELS_PATH": Path("/abs/dist"),
"OUTPUT_DIR": Path("/abs/shared"),
}.get(name, Path(default) if default is not None else None),
)
@patch.dict(
os.environ,
{
"USE_TORCH_WHEEL": "1",
"USE_LOCAL_BASE_IMAGE": "1",
"USE_LOCAL_DOCKERFILE": "1",
"BASE_IMAGE": "my/image:tag",
"DOCKERFILE_PATH": "vllm/Dockerfile",
"TORCH_WHEELS_PATH": "dist",
"OUTPUT_DIR": "shared",
},
clear=True,
)
def test_params_success_normalizes_and_validates(
self, mock_env_path, mock_is_path, mock_local_img
):
params = vllm_build.VllmBuildParameters()
self.assertEqual(params.torch_whls_path, Path("/abs/dist"))
self.assertEqual(params.dockerfile_path, Path("/abs/vllm/Dockerfile"))
self.assertEqual(params.output_dir, Path("/abs/shared"))
self.assertEqual(params.base_image, "my/image:tag")
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=False)
@patch.dict(
os.environ, {"USE_TORCH_WHEEL": "1", "TORCH_WHEELS_PATH": "dist"}, clear=True
)
def test_params_missing_torch_whls_raises(self, _is_path):
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with self.assertRaises(ValueError) as cm:
vllm_build.VllmBuildParameters(
use_local_base_image=False,
use_local_dockerfile=False,
)
err = cm.exception
self.assertIn("TORCH_WHEELS_PATH", str(err))
@patch(f"{_VLLM_BUILD_MODULE}.local_image_exists", return_value=False)
@patch.dict(
os.environ, {"USE_LOCAL_BASE_IMAGE": "1", "BASE_IMAGE": "img:tag"}, clear=True
)
def test_params_missing_local_base_image_raises(self, _local_img):
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with self.assertRaises(ValueError) as cm:
vllm_build.VllmBuildParameters(
use_torch_whl=False,
use_local_dockerfile=False,
)
err = cm.exception
self.assertIn("BASE_IMAGE", str(err))
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=False)
@patch.dict(
os.environ,
{"USE_LOCAL_DOCKERFILE": "1", "DOCKERFILE_PATH": "Dockerfile"},
clear=True,
)
def test_params_missing_dockerfile_raises(self, _is_path):
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with self.assertRaises(ValueError) as cm:
vllm_build.VllmBuildParameters(
use_torch_whl=False,
use_local_base_image=False,
)
err = cm.exception
self.assertIn("DOCKERFILE_PATH", str(err))
@patch(f"{_VLLM_BUILD_MODULE}.is_path_exist", return_value=False)
@patch.dict(
os.environ,
{"OUTPUT_DIR": ""},
clear=True,
)
def test_params_missing_output_dir(self, _is_path):
with self.assertRaises(FileNotFoundError):
vllm_build.VllmBuildParameters()
| TestVllmBuildParameters |
python | yaml__pyyaml | lib/yaml/__init__.py | {
"start": 11507,
"end": 12316
} | class ____(metaclass=YAMLObjectMetaclass):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = [Loader, FullLoader, UnsafeLoader]
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
@classmethod
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
@classmethod
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
| YAMLObject |
python | python-attrs__attrs | src/attr/_version_info.py | {
"start": 205,
"end": 2222
} | class ____:
"""
A version object that can be compared to tuple of length 1--4:
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
True
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
True
>>> vi = attr.VersionInfo(19, 2, 0, "final")
>>> vi < (19, 1, 1)
False
>>> vi < (19,)
False
>>> vi == (19, 2,)
True
>>> vi == (19, 2, 1)
False
.. versionadded:: 19.2
"""
year = attrib(type=int)
minor = attrib(type=int)
micro = attrib(type=int)
releaselevel = attrib(type=str)
@classmethod
def _from_version_string(cls, s):
"""
Parse *s* and return a _VersionInfo.
"""
v = s.split(".")
if len(v) == 3:
v.append("final")
return cls(
year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
)
def _ensure_tuple(self, other):
"""
Ensure *other* is a tuple of a valid length.
Returns a possibly transformed *other* and ourselves as a tuple of
the same length as *other*.
"""
if self.__class__ is other.__class__:
other = astuple(other)
if not isinstance(other, tuple):
raise NotImplementedError
if not (1 <= len(other) <= 4):
raise NotImplementedError
return astuple(self)[: len(other)], other
def __eq__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
return us == them
def __lt__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
# have to do anything special with releaselevel for now.
return us < them
def __hash__(self):
return hash((self.year, self.minor, self.micro, self.releaselevel))
| VersionInfo |
python | huggingface__transformers | src/transformers/models/lightglue/modular_lightglue.py | {
"start": 1726,
"end": 8814
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LightGlueForKeypointMatching`]. It is used to
instantiate a LightGlue model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LightGlue
[ETH-CVG/lightglue_superpoint](https://huggingface.co/ETH-CVG/lightglue_superpoint) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
keypoint_detector_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SuperPointConfig`):
The config object or dictionary of the keypoint detector.
descriptor_dim (`int`, *optional*, defaults to 256):
The dimension of the descriptors.
num_hidden_layers (`int`, *optional*, defaults to 9):
The number of self and cross attention layers.
num_attention_heads (`int`, *optional*, defaults to 4):
The number of heads in the multi-head attention.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
depth_confidence (`float`, *optional*, defaults to 0.95):
The confidence threshold used to perform early stopping
width_confidence (`float`, *optional*, defaults to 0.99):
The confidence threshold used to prune points
filter_threshold (`float`, *optional*, defaults to 0.1):
The confidence threshold used to filter matches
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function to be used in the hidden layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether to trust remote code when using other models than SuperPoint as keypoint detector.
Examples:
```python
>>> from transformers import LightGlueConfig, LightGlueForKeypointMatching
>>> # Initializing a LightGlue style configuration
>>> configuration = LightGlueConfig()
>>> # Initializing a model from the LightGlue style configuration
>>> model = LightGlueForKeypointMatching(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "lightglue"
sub_configs = {"keypoint_detector_config": AutoConfig}
def __init__(
self,
keypoint_detector_config: SuperPointConfig = None,
descriptor_dim: int = 256,
num_hidden_layers: int = 9,
num_attention_heads: int = 4,
num_key_value_heads=None,
depth_confidence: float = 0.95,
width_confidence: float = 0.99,
filter_threshold: float = 0.1,
initializer_range: float = 0.02,
hidden_act: str = "gelu",
attention_dropout=0.0,
attention_bias=True,
trust_remote_code: bool = False,
**kwargs,
):
# LightGlue can be used with other models than SuperPoint as keypoint detector
# We provide the trust_remote_code argument to allow the use of other models
# that are not registered in the CONFIG_MAPPING dictionary (for example DISK)
self.trust_remote_code = trust_remote_code
if descriptor_dim % num_attention_heads != 0:
raise ValueError("descriptor_dim % num_heads is different from zero")
self.descriptor_dim = descriptor_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.depth_confidence = depth_confidence
self.width_confidence = width_confidence
self.filter_threshold = filter_threshold
self.initializer_range = initializer_range
# Keypoint Detector is forced into eager attention mode because SuperPoint does not have Attention
# See https://github.com/huggingface/transformers/pull/31718#discussion_r2109733153
if isinstance(keypoint_detector_config, dict):
keypoint_detector_config["model_type"] = keypoint_detector_config.get("model_type", "superpoint")
if keypoint_detector_config["model_type"] not in CONFIG_MAPPING:
keypoint_detector_config = AutoConfig.from_pretrained(
keypoint_detector_config["_name_or_path"], trust_remote_code=self.trust_remote_code
)
else:
keypoint_detector_config = CONFIG_MAPPING[keypoint_detector_config["model_type"]](
**keypoint_detector_config, attn_implementation="eager"
)
if keypoint_detector_config is None:
keypoint_detector_config = CONFIG_MAPPING["superpoint"](attn_implementation="eager")
self.keypoint_detector_config = keypoint_detector_config
self.hidden_size = descriptor_dim
self.intermediate_size = descriptor_dim * 2
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.attention_bias = attention_bias
super().__init__(**kwargs)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of LightGlue keypoint matching models. Due to the nature of keypoint detection and matching,
the number of keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the
batch of images, the maximum number of matches is set as the dimension of the matches and matching scores. The mask
tensor is used to indicate which values in the keypoints, matches, matching_scores and prune tensors are keypoint
matching information.
"""
)
| LightGlueConfig |
python | huggingface__transformers | src/transformers/models/smollm3/modeling_smollm3.py | {
"start": 15814,
"end": 19657
} | class ____(SmolLM3PreTrainedModel):
def __init__(self, config: SmolLM3Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[SmolLM3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = SmolLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = SmolLM3RotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
# The sliding window alternating layers are not always activated depending on the config
if self.has_sliding_layers:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
@auto_docstring
| SmolLM3Model |
python | doocs__leetcode | solution/2000-2099/2088.Count Fertile Pyramids in a Land/Solution.py | {
"start": 0,
"end": 865
} | class ____:
def countPyramids(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
f = [[0] * n for _ in range(m)]
ans = 0
for i in range(m - 1, -1, -1):
for j in range(n):
if grid[i][j] == 0:
f[i][j] = -1
elif not (i == m - 1 or j == 0 or j == n - 1):
f[i][j] = min(f[i + 1][j - 1], f[i + 1][j], f[i + 1][j + 1]) + 1
ans += f[i][j]
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
f[i][j] = -1
elif i == 0 or j == 0 or j == n - 1:
f[i][j] = 0
else:
f[i][j] = min(f[i - 1][j - 1], f[i - 1][j], f[i - 1][j + 1]) + 1
ans += f[i][j]
return ans
| Solution |
python | ray-project__ray | rllib/core/rl_module/apis/q_net_api.py | {
"start": 157,
"end": 2045
} | class ____(abc.ABC):
"""An API to be implemented by RLModules used for (distributional) Q-learning.
RLModules implementing this API must override the `compute_q_values` and the
`compute_advantage_distribution` methods.
"""
@abc.abstractmethod
def compute_q_values(
self,
batch: Dict[str, TensorType],
) -> Dict[str, TensorType]:
"""Computes Q-values, given encoder, q-net and (optionally), advantage net.
Note, these can be accompanied by logits and probabilities
in case of distributional Q-learning, i.e. `self.num_atoms > 1`.
Args:
batch: The batch received in the forward pass.
Results:
A dictionary containing the Q-value predictions ("qf_preds")
and in case of distributional Q-learning - in addition to the Q-value
predictions ("qf_preds") - the support atoms ("atoms"), the Q-logits
("qf_logits"), and the probabilities ("qf_probs").
"""
def compute_advantage_distribution(
self,
batch: Dict[str, TensorType],
) -> Dict[str, TensorType]:
"""Computes the advantage distribution.
Note this distribution is identical to the Q-distribution in case no dueling
architecture is used.
Args:
batch: A dictionary containing a tensor with the outputs of the
forward pass of the Q-head or advantage stream head.
Returns:
A `dict` containing the support of the discrete distribution for
either Q-values or advantages (in case of a dueling architecture),
("atoms"), the logits per action and atom and the probabilities
of the discrete distribution (per action and atom of the support).
"""
# Return the Q-distribution by default.
return self.compute_q_values(batch)
| QNetAPI |
python | walkccc__LeetCode | solutions/1151. Minimum Swaps to Group All 1's Together/1151.py | {
"start": 0,
"end": 371
} | class ____:
def minSwaps(self, data: list[int]) -> int:
k = data.count(1)
ones = 0 # the number of ones in the window
maxOnes = 0 # the maximum number of ones in the window
for i, num in enumerate(data):
if i >= k and data[i - k]:
ones -= 1
if num:
ones += 1
maxOnes = max(maxOnes, ones)
return k - maxOnes
| Solution |
python | lepture__mistune | src/mistune/directives/include.py | {
"start": 273,
"end": 2343
} | class ____(DirectivePlugin):
def parse(
self, block: "BlockParser", m: Match[str], state: "BlockState"
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
source_file = state.env.get("__file__")
if not source_file:
return {"type": "block_error", "raw": "Missing source file"}
encoding = "utf-8"
options = self.parse_options(m)
if options:
attrs = dict(options)
if "encoding" in attrs:
encoding = attrs["encoding"]
else:
attrs = {}
relpath = self.parse_title(m)
dest = os.path.join(os.path.dirname(source_file), relpath)
dest = os.path.normpath(dest)
if dest == source_file:
return {
"type": "block_error",
"raw": "Could not include self: " + relpath,
}
if not os.path.isfile(dest):
return {
"type": "block_error",
"raw": "Could not find file: " + relpath,
}
with open(dest, "rb") as f:
content = f.read().decode(encoding)
ext = os.path.splitext(relpath)[1]
if ext in {".md", ".markdown", ".mkd"}:
new_state = block.state_cls()
new_state.env["__file__"] = dest
new_state.process(content)
block.parse(new_state)
return new_state.tokens
elif ext in {".html", ".xhtml", ".htm"}:
return {"type": "block_html", "raw": content}
attrs["filepath"] = dest
return {
"type": "include",
"raw": content,
"attrs": attrs,
}
def __call__(self, directive: BaseDirective, md: "Markdown") -> None:
directive.register("include", self.parse)
if md.renderer and md.renderer.NAME == "html":
md.renderer.register("include", render_html_include)
def render_html_include(renderer: "BaseRenderer", text: str, **attrs: Any) -> str:
return '<pre class="directive-include">\n' + text + "</pre>\n"
| Include |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/configurable.py | {
"start": 15079,
"end": 15347
} | class ____(str, enum.Enum):
"""String enum."""
_enums_for_spec: WeakValueDictionary[
ConfigurableFieldSingleOption | ConfigurableFieldMultiOption | ConfigurableField,
type[StrEnum],
] = WeakValueDictionary()
_enums_for_spec_lock = threading.Lock()
| StrEnum |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 27642,
"end": 28790
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = MraAttention(config)
self.add_cross_attention = config.add_cross_attention
self.intermediate = MraIntermediate(config)
self.output = MraOutput(config)
def forward(self, hidden_states, attention_mask=None):
self_attention_outputs = self.attention(hidden_states, attention_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| MraLayer |
python | walkccc__LeetCode | solutions/2980. Check if Bitwise OR Has Trailing Zeros/2980.py | {
"start": 0,
"end": 121
} | class ____:
def hasTrailingZeros(self, nums: list[int]) -> bool:
return sum(num % 2 == 0 for num in nums) >= 2
| Solution |
python | walkccc__LeetCode | solutions/473. Matchsticks to Square/473.py | {
"start": 0,
"end": 648
} | class ____:
def makesquare(self, matchsticks: list[int]) -> bool:
if len(matchsticks) < 4:
return False
perimeter = sum(matchsticks)
if perimeter % 4 != 0:
return False
A = sorted(matchsticks)[::-1]
def dfs(selected: int, edges: list[int]) -> bool:
if selected == len(A):
return all(edge == edges[0] for edge in edges)
for i, edge in enumerate(edges):
if A[selected] > edge:
continue
edges[i] -= A[selected]
if dfs(selected + 1, edges):
return True
edges[i] += A[selected]
return False
return dfs(0, [perimeter // 4] * 4)
| Solution |
python | doocs__leetcode | solution/1100-1199/1140.Stone Game II/Solution.py | {
"start": 0,
"end": 388
} | class ____:
def stoneGameII(self, piles: List[int]) -> int:
@cache
def dfs(i, m):
if m * 2 >= n - i:
return s[n] - s[i]
return max(
s[n] - s[i] - dfs(i + x, max(m, x)) for x in range(1, m << 1 | 1)
)
n = len(piles)
s = list(accumulate(piles, initial=0))
return dfs(0, 1)
| Solution |
python | jina-ai__jina | tests/unit/serve/runtimes/worker/test_worker_runtime.py | {
"start": 5790,
"end": 11851
} | class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
time.sleep(5.0)
@requests
def foo(self, docs, **kwargs):
return docs
@pytest.mark.timeout(10)
@pytest.mark.asyncio
@pytest.mark.skip
async def test_worker_runtime_slow_init_exec():
args = _generate_pod_args(['--uses', 'SlowInitExecutor'])
cancel_event = multiprocessing.Event()
runtime_thread = Process(
target=start_runtime,
args=(args, cancel_event),
daemon=True,
)
runtime_started = time.time()
runtime_thread.start()
# wait a bit to the worker runtime has a chance to finish some things, but not the Executor init (5 secs)
time.sleep(1.0)
# try to connect a TCP socket to the gRPC server
# this should only succeed after the Executor is ready, which should be after 5 seconds
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
connected = False
while not connected:
try:
s.connect((args.host, args.port[0]))
connected = True
except:
time.sleep(0.2)
# Executor sleeps 5 seconds, so at least 5 seconds need to have elapsed here
assert time.time() - runtime_started > 5.0
assert BaseServer.wait_for_ready_or_shutdown(
timeout=3.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
result = await send_request_async(
_create_test_data_message(), f'{args.host}:{args.port[0]}', timeout=1.0
)
assert len(result.docs) == 1
cancel_event.set()
runtime_thread.join()
assert not BaseServer.is_ready(f'{args.host}:{args.port[0]}')
@pytest.mark.asyncio
async def test_worker_runtime_reflection():
args = _generate_pod_args()
cancel_event = multiprocessing.Event()
runtime_thread = Process(
target=start_runtime,
args=(args, cancel_event),
daemon=True,
)
runtime_thread.start()
assert BaseServer.wait_for_ready_or_shutdown(
timeout=3.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
async with grpc.aio.insecure_channel(f'{args.host}:{args.port[0]}') as channel:
service_names = await get_available_services(channel)
assert all(
service_name in service_names
for service_name in [
'jina.JinaRPC',
'jina.JinaDataRequestRPC',
'jina.JinaSingleDataRequestRPC',
]
)
cancel_event.set()
runtime_thread.join()
assert not BaseServer.is_ready(f'{args.host}:{args.port[0]}')
def _create_test_data_message(counter=0):
return list(request_generator('/', DocumentArray([Document(text=str(counter))])))[0]
@pytest.mark.asyncio
@pytest.mark.slow
@pytest.mark.timeout(5)
@pytest.mark.skip
async def test_decorator_monitoring(port_generator):
from jina import monitor
class DummyExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
self._proces(docs)
self.process_2(docs)
@monitor(name='metrics_name', documentation='metrics description')
def _proces(self, docs): ...
@monitor()
def process_2(self, docs): ...
port = port_generator()
args = _generate_pod_args(
['--monitoring', '--port-monitoring', str(port), '--uses', 'DummyExecutor']
)
cancel_event = multiprocessing.Event()
runtime_thread = Process(
target=start_runtime,
args=(args, cancel_event),
daemon=True,
)
runtime_thread.start()
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
await send_request_async(
_create_test_data_message(), f'{args.host}:{args.port[0]}', timeout=1.0
)
resp = req.get(f'http://localhost:{port}/')
assert f'jina_metrics_name_count{{runtime_name="None"}} 1.0' in str(resp.content)
cancel_event.set()
runtime_thread.join()
assert not BaseServer.is_ready(f'{args.host}:{args.port[0]}')
@pytest.mark.asyncio
@pytest.mark.slow
@pytest.mark.timeout(5)
@pytest.mark.skip
async def test_decorator_monitoring(port_generator):
class DummyExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
with self.monitor(
name='process_seconds', documentation='process time in seconds '
):
self._proces(docs)
with self.monitor(
name='process_2_seconds', documentation='process 2 time in seconds '
):
self.process_2(docs)
def _proces(self, docs): ...
def process_2(self, docs): ...
port = port_generator()
args = _generate_pod_args(
['--monitoring', '--port-monitoring', str(port), '--uses', 'DummyExecutor']
)
cancel_event = multiprocessing.Event()
runtime_thread = Process(
target=start_runtime,
args=(args, cancel_event),
daemon=True,
)
runtime_thread.start()
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
assert BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'{args.host}:{args.port[0]}',
ready_or_shutdown_event=Event(),
)
await send_request_async(
_create_test_data_message(), f'{args.host}:{args.port[0]}', timeout=1.0
)
resp = req.get(f'http://localhost:{port}/')
assert f'jina_process_seconds_count{{runtime_name="None"}} 1.0' in str(resp.content)
cancel_event.set()
runtime_thread.join()
assert not BaseServer.is_ready(f'{args.host}:{args.port[0]}')
| SlowInitExecutor |
python | dagster-io__dagster | python_modules/libraries/dagster-omni/dagster_omni/objects.py | {
"start": 4444,
"end": 4960
} | class ____:
"""Serializable container object for recording the state of the Omni API at a given point in time.
Properties:
documents: list[OmniDocument]
users: list[OmniUser]
"""
documents: list[OmniDocument]
users: list[OmniUser]
@cached_property
def _users_by_id(self) -> dict[str, OmniUser]:
return {user.id: user for user in self.users}
def get_user(self, user_id: str) -> Optional[OmniUser]:
return self._users_by_id.get(user_id)
| OmniWorkspaceData |
python | pytest-dev__pytest | testing/test_unittest.py | {
"start": 45688,
"end": 50745
} | class ____:
"""
Make sure to show exceptions raised during class cleanup function (those registered
via addClassCleanup()).
See #11728.
"""
def test_class_cleanups_failure_in_setup(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
def cleanup(n):
raise Exception(f"fail {n}")
cls.addClassCleanup(cleanup, 2)
cls.addClassCleanup(cleanup, 1)
raise Exception("fail 0")
def test(self):
pass
"""
)
result = pytester.runpytest("-s", testpath)
result.assert_outcomes(passed=0, errors=1)
result.stdout.fnmatch_lines(
[
"*Unittest class cleanup errors *2 sub-exceptions*",
"*Exception: fail 1",
"*Exception: fail 2",
]
)
result.stdout.fnmatch_lines(
[
"* ERROR at setup of MyTestCase.test *",
"E * Exception: fail 0",
]
)
def test_class_cleanups_failure_in_teardown(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
def cleanup(n):
raise Exception(f"fail {n}")
cls.addClassCleanup(cleanup, 2)
cls.addClassCleanup(cleanup, 1)
def test(self):
pass
"""
)
result = pytester.runpytest("-s", testpath)
result.assert_outcomes(passed=1, errors=1)
result.stdout.fnmatch_lines(
[
"*Unittest class cleanup errors *2 sub-exceptions*",
"*Exception: fail 1",
"*Exception: fail 2",
]
)
def test_class_cleanup_1_failure_in_teardown(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
def cleanup(n):
raise Exception(f"fail {n}")
cls.addClassCleanup(cleanup, 1)
def test(self):
pass
"""
)
result = pytester.runpytest("-s", testpath)
result.assert_outcomes(passed=1, errors=1)
result.stdout.fnmatch_lines(
[
"*ERROR at teardown of MyTestCase.test*",
"*Exception: fail 1",
]
)
def test_traceback_pruning(pytester: Pytester) -> None:
"""Regression test for #9610 - doesn't crash during traceback pruning."""
pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def __init__(self, test_method):
unittest.TestCase.__init__(self, test_method)
class TestIt(MyTestCase):
@classmethod
def tearDownClass(cls) -> None:
assert False
def test_it(self):
pass
"""
)
reprec = pytester.inline_run()
passed, _skipped, failed = reprec.countoutcomes()
assert passed == 1
assert failed == 1
assert reprec.ret == 1
def test_raising_unittest_skiptest_during_collection(
pytester: Pytester,
) -> None:
pytester.makepyfile(
"""
import unittest
class TestIt(unittest.TestCase):
def test_it(self): pass
def test_it2(self): pass
raise unittest.SkipTest()
class TestIt2(unittest.TestCase):
def test_it(self): pass
def test_it2(self): pass
"""
)
reprec = pytester.inline_run()
passed, skipped, failed = reprec.countoutcomes()
assert passed == 0
# Unittest reports one fake test for a skipped module.
assert skipped == 1
assert failed == 0
assert reprec.ret == ExitCode.NO_TESTS_COLLECTED
def test_abstract_testcase_is_not_collected(pytester: Pytester) -> None:
"""Regression test for #12275."""
pytester.makepyfile(
"""
import abc
import unittest
class TestBase(unittest.TestCase, abc.ABC):
@abc.abstractmethod
def abstract1(self): pass
@abc.abstractmethod
def abstract2(self): pass
def test_it(self): pass
class TestPartial(TestBase):
def abstract1(self): pass
class TestConcrete(TestPartial):
def abstract2(self): pass
"""
)
result = pytester.runpytest()
assert result.ret == ExitCode.OK
result.assert_outcomes(passed=1)
| TestClassCleanupErrors |
python | kamyu104__LeetCode-Solutions | Python/maximum-nesting-depth-of-two-valid-parentheses-strings.py | {
"start": 265,
"end": 739
} | class ____(object):
def maxDepthAfterSplit(self, seq):
"""
:type seq: str
:rtype: List[int]
"""
A, B = 0, 0
result = [0]*len(seq)
for i, c in enumerate(seq):
point = 1 if c == '(' else -1
if (point == 1 and A <= B) or \
(point == -1 and A >= B):
A += point
else:
B += point
result[i] = 1
return result
| Solution2 |
python | python__mypy | mypy/types.py | {
"start": 16962,
"end": 17523
} | class ____(Type):
"""Required[T] or NotRequired[T]. Only usable at top-level of a TypedDict definition."""
def __init__(self, item: Type, *, required: bool) -> None:
super().__init__(line=item.line, column=item.column)
self.item = item
self.required = required
def __repr__(self) -> str:
if self.required:
return f"Required[{self.item}]"
else:
return f"NotRequired[{self.item}]"
def accept(self, visitor: TypeVisitor[T]) -> T:
return self.item.accept(visitor)
| RequiredType |
python | pypa__setuptools | setuptools/_vendor/tomli/_parser.py | {
"start": 6204,
"end": 7220
} | class ____:
def __init__(self) -> None:
# The parsed content of the TOML document
self.dict: dict[str, Any] = {}
def get_or_create_nest(
self,
key: Key,
*,
access_lists: bool = True,
) -> dict:
cont: Any = self.dict
for k in key:
if k not in cont:
cont[k] = {}
cont = cont[k]
if access_lists and isinstance(cont, list):
cont = cont[-1]
if not isinstance(cont, dict):
raise KeyError("There is no nest behind this key")
return cont
def append_nest_to_list(self, key: Key) -> None:
cont = self.get_or_create_nest(key[:-1])
last_key = key[-1]
if last_key in cont:
list_ = cont[last_key]
if not isinstance(list_, list):
raise KeyError("An object other than list found behind this key")
list_.append({})
else:
cont[last_key] = [{}]
| NestedDict |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_roman_numeral.py | {
"start": 525,
"end": 1661
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_roman_numeral"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def matches_roman_numeral_regex(x):
return bool(re.match(ROMAN_NUMERAL_REGEX, str(x)))
return column.apply(lambda x: matches_roman_numeral_regex(x) if x else False)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidRomanNumeral |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_libsparse.py | {
"start": 17476,
"end": 19043
} | class ____:
@pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"])
def test_op(self, opname, cases, test_length):
xloc, xlen, yloc, ylen, _, _ = cases
sparse_op = getattr(splib, f"sparse_{opname}_float64")
python_op = getattr(operator, opname)
xindex = BlockIndex(test_length, xloc, xlen)
yindex = BlockIndex(test_length, yloc, ylen)
xdindex = xindex.to_int_index()
ydindex = yindex.to_int_index()
x = np.arange(xindex.npoints) * 10.0 + 1
y = np.arange(yindex.npoints) * 100.0 + 1
xfill = 0
yfill = 2
result_block_vals, rb_index, bfill = sparse_op(
x, xindex, xfill, y, yindex, yfill
)
result_int_vals, ri_index, ifill = sparse_op(
x, xdindex, xfill, y, ydindex, yfill
)
assert rb_index.to_int_index().equals(ri_index)
tm.assert_numpy_array_equal(result_block_vals, result_int_vals)
assert bfill == ifill
# check versus Series...
xseries = Series(x, xdindex.indices)
xseries = xseries.reindex(np.arange(test_length)).fillna(xfill)
yseries = Series(y, ydindex.indices)
yseries = yseries.reindex(np.arange(test_length)).fillna(yfill)
series_result = python_op(xseries, yseries)
series_result = series_result.reindex(ri_index.indices)
tm.assert_numpy_array_equal(result_block_vals, series_result.values)
tm.assert_numpy_array_equal(result_int_vals, series_result.values)
| TestSparseOperators |
python | django-extensions__django-extensions | django_extensions/db/models.py | {
"start": 246,
"end": 789
} | class ____(models.Model):
"""
TimeStampedModel
An abstract base class model that provides self-managed "created" and
"modified" fields.
"""
created = CreationDateTimeField(_("created"))
modified = ModificationDateTimeField(_("modified"))
def save(self, **kwargs):
self.update_modified = kwargs.pop(
"update_modified", getattr(self, "update_modified", True)
)
super().save(**kwargs)
class Meta:
get_latest_by = "modified"
abstract = True
| TimeStampedModel |
python | aio-libs__aiohttp | tests/test_web_exceptions.py | {
"start": 10398,
"end": 11572
} | class ____:
def test_ctor(self) -> None:
resp = web.HTTPRequestEntityTooLarge(
max_size=100,
actual_size=123,
headers={"X-Custom": "value"},
reason="Too large",
)
assert resp.text == (
"Maximum request body size 100 exceeded, actual body size 123"
)
compare: Mapping[str, str] = {"X-Custom": "value", "Content-Type": "text/plain"}
assert resp.headers == compare
assert resp.reason == "Too large"
assert resp.status == 413
def test_pickle(self) -> None:
resp = web.HTTPRequestEntityTooLarge(
100, actual_size=123, headers={"X-Custom": "value"}, reason="Too large"
)
resp.foo = "bar" # type: ignore[attr-defined]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(resp, proto)
resp2 = pickle.loads(pickled)
assert resp2.text == resp.text
assert resp2.headers == resp.headers
assert resp2.reason == "Too large"
assert resp2.status == 413
assert resp2.foo == "bar"
| TestHTTPRequestEntityTooLarge |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-klaviyo/components.py | {
"start": 1823,
"end": 4328
} | class ____(StateMigration, ABC):
"""
Updates old format state to new per partitioned format.
Partitions: [{archived: True}, {archived: False}]
Default built in airbyte cdk migration will recognise only top-level field cursor value(updated_at),
but for partition {archived: True} source should use cursor value from archived object.
Example input state:
{
"updated_at": "2020-10-10T00:00:00+00:00",
"archived": {
"updated_at": "2021-10-10T00:00:00+00:00"
}
}
Example output state:
{
"partition":{ "archived":"true" },
"cursor":{ "updated_at":"2021-10-10T00:00:00+00:00" }
}
{
"partition":{ "archived":"false" },
"cursor":{ "updated_at":"2020-10-10T00:00:00+00:00" }
}
"""
declarative_stream: DeclarativeStreamModel
config: Config
def __init__(self, declarative_stream: DeclarativeStreamModel, config: Config):
self._config = config
self.declarative_stream = declarative_stream
self._cursor = declarative_stream.incremental_sync
self._parameters = declarative_stream.parameters
self._cursor_field = InterpolatedString.create(self._cursor.cursor_field, parameters=self._parameters).eval(self._config)
def get_archived_cursor_value(self, stream_state: Mapping[str, Any]):
return stream_state.get("archived", {}).get(self._cursor.cursor_field, self._config.get("start_date", DEFAULT_START_DATE))
def get_not_archived_cursor_value(self, stream_state: Mapping[str, Any]):
return stream_state.get(self._cursor.cursor_field, self._config.get("start_date", DEFAULT_START_DATE))
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
return bool("states" not in stream_state and stream_state)
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
if not self.should_migrate(stream_state):
return stream_state
is_archived_updated_at = self.get_archived_cursor_value(stream_state)
is_not_archived_updated_at = self.get_not_archived_cursor_value(stream_state)
migrated_stream_state = {
"states": [
{"partition": ARCHIVED, "cursor": {self._cursor.cursor_field: is_archived_updated_at}},
{"partition": NOT_ARCHIVED, "cursor": {self._cursor.cursor_field: is_not_archived_updated_at}},
]
}
return migrated_stream_state
| ArchivedToPerPartitionStateMigration |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0071_add_env_var_privacy.py | {
"start": 100,
"end": 617
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0070_make_md5_field_nullable"),
]
operations = [
migrations.AddField(
model_name="environmentvariable",
name="public",
field=models.BooleanField(
null=True,
default=False,
help_text="Expose this environment variable in PR builds?",
verbose_name="Public",
),
),
]
| Migration |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/linear_unpack_fp16_test.py | {
"start": 585,
"end": 1508
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
# input to unpack operator must be what the output is for prepack operator
self.inputs = {
"input_one": torch.ops.quantized.linear_prepack_fp16(
torch.rand(
M, N, K, device=device, requires_grad=False, dtype=torch.float32
)
)
}
self.set_module_name("linear_unpack_fp16")
def forward(self, input_one):
return torch.ops.quantized.linear_unpack_fp16(input_one)
# The generated test names based on linear_unpack_fp16_short_configs will be in the following pattern:
# linear_unpack_fp16_M8_N16_K32_devicecpu
op_bench.generate_pt_test(
linear_unpack_fp16_long_configs + linear_unpack_fp16_short_configs,
LinearUnpackFP16Benchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| LinearUnpackFP16Benchmark |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 6604,
"end": 6670
} | class ____(ResampleReduction):
how = "quantile"
| ResampleQuantile |
python | pytorch__pytorch | test/quantization/fx/test_numeric_suite_fx.py | {
"start": 9862,
"end": 30867
} | class ____(QuantizationTestCase):
@skipIfNoFBGEMM
def test_simple_mod(self):
m = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, 1, 1),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
expected_types = {
conv_name_0: ((nn.Conv2d, torch.ao.quantization.MinMaxObserver), (nnq.Conv2d, nnq.Conv2d)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fun(self):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = nn.Parameter(torch.empty(1, 4))
self.b = nn.Parameter(torch.zeros(1))
torch.nn.init.kaiming_uniform_(self.w, a=math.sqrt(5))
def forward(self, x):
return F.linear(x, self.w, self.b)
m = M().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, 1, 1),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear, toq.linear))
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_fusion(self):
m = LinearReluFunctional().eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(4, 4),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
linear_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.linear) + '_0'
expected_types = {
linear_name_0:
((F.linear, torch.ao.quantization.MinMaxObserver), (toq.linear_relu, toq.linear_relu)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_simple_mod_multi(self):
m = nn.Sequential(
nn.Sequential(
nn.Conv2d(1, 1, 1),
),
nn.Conv2d(1, 1, 1),
).eval()
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=(torch.randn(1, 1, 1, 1),))
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_simple_tensor_ops(self):
class M(nn.Module):
def forward(self, x, y):
z = x + y
return z
m = M().eval()
example_inputs = (torch.randn(1), torch.randn(1))
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
# assume success if no exceptions
results = get_matching_subgraph_pairs(mp, mq)
@skipIfNoFBGEMM
def test_matching_failure_node_count(self):
# verify that matching graphs with matching node types but
# different counts of matchable nodes fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Conv2d(1, 1, 1)).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_matching_failure_node_type(self):
# verify that matching graphs with non-matching node types fails
m1 = nn.Sequential(nn.Conv2d(1, 1, 1)).eval()
m2 = nn.Sequential(nn.Linear(1, 1)).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp1 = prepare_fx(m1, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
example_inputs = (torch.randn(1, 1),)
mp2 = prepare_fx(m2, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
with self.assertRaises(GraphMatchingException) as ex:
results = get_matching_subgraph_pairs(mp1, mp2)
@skipIfNoFBGEMM
def test_nodes_before_cat(self):
# verify that nodes before cat get matched
class M(nn.Module):
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
x2 = torch.cat([x1, y1])
return x2
m = M().eval()
example_inputs = (torch.randn(1),)
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
cat_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.cat) + '_0'
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
expected_types = {
cat_name_0: ((torch.cat, torch.cat), (torch.cat, torch.cat)),
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_dict_return_type(self):
# verify that we can traverse up nodes which return dictionaries
class M(nn.Module):
def forward(self, x0):
x1 = torch.add(x0, 1.0)
y1 = torch.add(x0, 1.0)
z1 = torch.add(x0, 1.0)
a1 = {'x1': x1, 'y1': (y1,), 'z1': [{'key': (z1,)}]}
return a1
m = M().eval()
example_inputs = (torch.randn(1),)
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_0'
add_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_1'
add_name_2 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.add) + '_2'
expected_types = {
add_name_0: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_1: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
add_name_2: ((torch.add, torch.ao.quantization.MinMaxObserver), (toq.add, toq.add)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
@skipIfNoFBGEMM
def test_nodes_with_equal_types_get_matched(self):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = torch.mul(x, x)
x = torch.sigmoid(x)
x = F.relu(x)
return x
m = M().eval()
# prevent conv2 from getting quantized, so we can test
# modules with equal types
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping().set_module_name("conv2", None)
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_mapping, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
conv_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_0'
conv_name_1 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, nn.Conv2d) + '_1'
mul_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.mul) + '_0'
relu_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.relu) + '_0'
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
# all of these should be matched
expected_types = {
conv_name_1:
((nn.Conv2d, torch.ao.quantization.HistogramObserver), (nnq.Conv2d, nnq.Conv2d)),
conv_name_0:
((nn.Conv2d, torch.ao.quantization.HistogramObserver), (nn.Conv2d, nn.Conv2d)),
mul_name_0: ((torch.mul, torch.ao.quantization.HistogramObserver), (toq.mul, toq.mul)),
relu_name_0: ((F.relu, torch.ao.quantization.FixedQParamsObserver), (F.relu, F.relu)),
sigmoid_name_0:
((torch.sigmoid, torch.ao.quantization.FixedQParamsObserver), (torch.sigmoid, torch.sigmoid)),
}
self.assert_types_for_matched_subgraph_pairs(results, expected_types, mp, mq)
def test_methods(self):
"""
Verify that graph matching works on methods
"""
class M(nn.Module):
def forward(self, x):
x = x.sigmoid()
return x
m1 = M().eval()
m2 = M().eval()
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping()
example_inputs = (torch.randn(1),)
m1p = prepare_fx(m1, qconfig_mapping, example_inputs=example_inputs)
m2p = prepare_fx(m2, qconfig_mapping, example_inputs=example_inputs)
results = get_matching_subgraph_pairs(m1p, m2p)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
sigmoid_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, torch.sigmoid) + '_0'
expected_types = {
sigmoid_name_0:
(('sigmoid', torch.ao.quantization.FixedQParamsObserver), ('sigmoid', torch.ao.quantization.FixedQParamsObserver)),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1p, m2p)
def test_op_relationship_mapping(self):
"""
Tests that the mapping of op relationships is complete.
"""
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# 1. check static quant module mappings
static_quant_mod_mappings = get_default_static_quant_module_mappings()
for fp32_type, int8_type in static_quant_mod_mappings.items():
# skip quants and dequants, for the purposes of Numerical Suite
types_to_skip = (
torch.ao.quantization.QuantStub,
torch.ao.quantization.DeQuantStub,
nnq.FloatFunctional,
# the ConvTranspose3d swap is not implemented in FX Graph
# mode quantization yet
nn.ConvTranspose3d,
# the GroupNorm swap is not implemented in FX Graph
# mode quantization yet
nn.GroupNorm,
# nnq.ReLU6 is no longer swapped, because nn.ReLU6 can
# take quantized inputs
nn.ReLU6,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 2. check static quant op mappings
static_quant_fun_mappings = get_default_float_to_quantized_operator_mappings()
for fp32_type, int8_type in static_quant_fun_mappings.items():
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 3. check dynamic quant mappings
dynamic_quant_mappings = get_default_dynamic_quant_module_mappings()
for fp32_type, int8_type in dynamic_quant_mappings.items():
# TODO(future PR): enable correct weight extraction for these
# and remove from this list.
types_to_skip = (
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
)
if fp32_type in types_to_skip:
continue
# verify relatedness
in_type_a_related_to_b = \
(fp32_type, int8_type) in type_a_related_to_b
self.assertTrue(
in_type_a_related_to_b,
f"{fp32_type} and {int8_type} need a relationship mapping")
# 4. go through the ops mapped to each QuantizeHandler type, and verify
# correctness.
def _op_in_base_sets_of_related_ops(op):
for ops in base_name_to_sets_of_related_ops.values():
if op in ops:
return True
return False
unmatchable_types_map = get_unmatchable_types_map()
FUNS_UNMATCHABLE = unmatchable_types_map['funs_unmatchable']
MODS_UNMATCHABLE = unmatchable_types_map['mods_unmatchable']
METHS_UNMATCHABLE = unmatchable_types_map['meths_unmatchable']
def _op_is_unmatchable(op):
return (
op in FUNS_UNMATCHABLE or
op in MODS_UNMATCHABLE or
op in METHS_UNMATCHABLE
)
default_quant_patterns = get_all_quant_patterns()
for pattern, qhandler_cls in default_quant_patterns.items():
base_op = None
if isinstance(pattern, tuple):
base_op = pattern[-1]
elif isinstance(pattern, str):
base_op = pattern
else:
base_op = pattern
qhandler_cls_all_ops_quantizeable = [
qh.CatQuantizeHandler,
qh.ConvReluQuantizeHandler,
qh.LinearReLUQuantizeHandler,
qh.BatchNormQuantizeHandler,
qh.EmbeddingQuantizeHandler,
qh.RNNDynamicQuantizeHandler,
]
qhandler_cls_quant_op_same_signature = [
qh.FixedQParamsOpQuantizeHandler,
qh.CopyNodeQuantizeHandler,
qh.GeneralTensorShapeOpQuantizeHandler,
]
if qhandler_cls == qh.BinaryOpQuantizeHandler:
# these ops do not have quantized equivalents
ops_to_skip = [
torch.bmm,
torch.div,
torch.sub,
operator.truediv,
operator.sub
]
if base_op in ops_to_skip:
continue
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls == qh.RNNDynamicQuantizeHandler:
# TODO(future PR): add support for all classes in
# RNNDynamicQuantizeHandler
pass
elif qhandler_cls == qh.DefaultNodeQuantizeHandler:
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
elif qhandler_cls in qhandler_cls_quant_op_same_signature:
# these ops use the same op signature for fp32 and quantized
# tensors
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op) or
_op_is_unmatchable(base_op),
f"{base_op} not in sets of related ops or unmatchable")
elif qhandler_cls in qhandler_cls_all_ops_quantizeable:
self.assertTrue(
_op_in_base_sets_of_related_ops(base_op),
f"{base_op} not in sets of related ops")
else:
# torch.sum does not have quantized equivalents
if base_op in [
torch.sum,
nn.GRUCell,
nn.GRU,
nn.LSTMCell,
nn.RNNCell,
]:
continue
if isinstance(base_op, tuple):
# skip fusion patterns
continue
# didn't match explicit quantize handler class, we can check if the
# operator is in the related op set directly
if not (_op_in_base_sets_of_related_ops(base_op) or _op_is_unmatchable(base_op)):
raise AssertionError(
f"handling for {qhandler_cls} for op {base_op} not implemented")
@skipIfNoFBGEMM
def test_user_defined_function(self):
"""
Verify that graph matching works on user defined functions
"""
class M1(nn.Module):
def forward(self, x):
x = F.hardswish(x)
return x
class M2(nn.Module):
def forward(self, x):
x = _wrapped_hardswish(x)
return x
qconfig_mapping = torch.ao.quantization.get_default_qconfig_mapping()
example_inputs = (torch.randn(1, 1, 1, 1),)
m1 = prepare_fx(M1().eval(), qconfig_mapping, example_inputs=example_inputs)
m2 = prepare_fx(M2().eval(), qconfig_mapping, example_inputs=example_inputs)
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops, _wrapped_hardswish, F.hardswish)
results = get_matching_subgraph_pairs(
m1, m2,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops)
hardswish_name_0 = 'base_op_' + get_base_name_for_op(
base_name_to_sets_of_related_ops, F.hardswish) + '_0'
expected_types = {
hardswish_name_0:
((F.hardswish, torch.ao.quantization.HistogramObserver), (_wrapped_hardswish, _wrapped_hardswish)),
}
self.assert_types_for_matched_subgraph_pairs(
results, expected_types, m1, m2)
@skipIfNoFBGEMM
def test_results_order(self):
m = nn.Sequential(
nn.Conv2d(1, 1, 1),
nn.Linear(1, 1),
).eval()
example_inputs = (torch.randn(1, 1, 1, 1),)
mp = prepare_fx(m, {'': torch.ao.quantization.default_qconfig}, example_inputs=example_inputs)
mp_copy = copy.deepcopy(mp)
mq = convert_fx(mp_copy)
results = get_matching_subgraph_pairs(mp, mq)
self.assertTrue(len(results) == 2)
results_iter = iter(results.items())
_, (subgraph_a_0, subgraph_b_0) = next(results_iter)
self.assertTrue(subgraph_a_0.start_node.name == '_0' and
subgraph_b_0.start_node.name == '_0')
_, (subgraph_a_1, subgraph_b_1) = next(results_iter)
self.assertTrue(subgraph_a_1.start_node.name == '_1' and
subgraph_b_1.start_node.name == '_1')
| TestFXGraphMatcher |
python | catalyst-team__catalyst | catalyst/contrib/data/dataset.py | {
"start": 4149,
"end": 6032
} | class ____(ListDataset):
"""
Dataset that derives features and targets from samples filesystem paths.
Examples:
>>> label_fn = lambda x: x.split("_")[0]
>>> dataset = PathsDataset(
>>> filenames=Path("/path/to/images/").glob("*.jpg"),
>>> label_fn=label_fn,
>>> open_fn=open_fn,
>>> )
"""
def __init__(
self,
filenames: List[_Path],
open_fn: Callable[[dict], dict],
label_fn: Callable[[_Path], Any],
features_key: str = "features",
target_key: str = "targets",
**list_dataset_params
):
"""
Args:
filenames: list of file paths that store information
about your dataset samples; it could be images, texts or
any other files in general.
open_fn: function, that can open your
annotations dict and
transfer it to data, needed by your network
(for example open image by path, or tokenize read string)
label_fn: function, that can extract target
value from sample path
(for example, your sample could be an image file like
``/path/to/your/image_1.png`` where the target is encoded as
a part of file path)
features_key: key to use to store sample features
target_key: key to use to store target label
list_dataset_params: base class initialization
parameters.
"""
list_data = [
{features_key: filename, target_key: label_fn(filename)}
for filename in filenames
]
super().__init__(list_data=list_data, open_fn=open_fn, **list_dataset_params)
__all__ = [
"ListDataset",
"MergeDataset",
"NumpyDataset",
"PathsDataset",
]
| PathsDataset |
python | pypa__warehouse | warehouse/manage/views/organizations.py | {
"start": 62433,
"end": 73882
} | class ____:
def __init__(self, organization, request):
self.organization = organization
self.request = request
self.metrics = self.request.metrics
self.project_service = self.request.find_service(IProjectService)
self.pending_github_publisher_form = PendingGitHubPublisherForm(
self.request.POST,
api_token=self.request.registry.settings.get("github.token"),
route_url=self.request.route_url,
check_project_name=self.project_service.check_project_name,
user=request.user, # Still need to pass user for form validation
)
_gl_issuers = GitLabPublisher.get_available_issuer_urls(
organization=organization
)
self.pending_gitlab_publisher_form = PendingGitLabPublisherForm(
self.request.POST,
route_url=self.request.route_url,
check_project_name=self.project_service.check_project_name,
user=request.user,
issuer_url_choices=_gl_issuers,
)
self.pending_google_publisher_form = PendingGooglePublisherForm(
self.request.POST,
route_url=self.request.route_url,
check_project_name=self.project_service.check_project_name,
user=request.user,
)
self.pending_activestate_publisher_form = PendingActiveStatePublisherForm(
self.request.POST,
route_url=self.request.route_url,
check_project_name=self.project_service.check_project_name,
user=request.user,
)
@property
def default_response(self):
# Get pending publishers owned by this organization
pending_oidc_publishers = self.organization.pending_oidc_publishers
return {
"organization": self.organization,
"pending_github_publisher_form": self.pending_github_publisher_form,
"pending_gitlab_publisher_form": self.pending_gitlab_publisher_form,
"pending_google_publisher_form": self.pending_google_publisher_form,
"pending_activestate_publisher_form": self.pending_activestate_publisher_form, # noqa: E501
"pending_oidc_publishers": pending_oidc_publishers,
"disabled": {
"GitHub": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_GITHUB_OIDC
),
"GitLab": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_GITLAB_OIDC
),
"Google": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_GOOGLE_OIDC
),
"ActiveState": self.request.flags.disallow_oidc(
AdminFlagValue.DISALLOW_ACTIVESTATE_OIDC
),
},
}
@view_config(request_method="GET")
def manage_organization_publishing(self):
if self.request.flags.disallow_oidc():
self.request.session.flash(
self.request._(
"Trusted publishing is temporarily disabled. "
"See https://pypi.org/help#admin-intervention for details."
),
queue="error",
)
return self.default_response
return self.default_response
def _add_pending_oidc_publisher(
self,
publisher_name,
publisher_class,
admin_flag,
form,
make_pending_publisher,
make_existence_filters,
):
"""Common logic for adding organization-level pending OIDC publishers."""
# Check admin flags
if self.request.flags.disallow_oidc(admin_flag):
self.request.session.flash(
self.request._(
f"{publisher_name}-based trusted publishing is temporarily "
"disabled. See https://pypi.org/help#admin-intervention for "
"details."
),
queue="error",
)
return self.default_response
self.metrics.increment(
"warehouse.oidc.add_pending_publisher.attempt",
tags=[f"publisher:{publisher_name}", "organization:true"],
)
# Validate form
if not form.validate():
self.request.session.flash(
self.request._("The trusted publisher could not be registered"),
queue="error",
)
return self.default_response
# Check if publisher already exists
publisher_already_exists = (
self.request.db.query(publisher_class)
.filter_by(**make_existence_filters(form))
.first()
is not None
)
if publisher_already_exists:
self.request.session.flash(
self.request._(
"This publisher has already been registered in your organization. "
"See your existing pending publishers below."
),
queue="error",
)
return self.default_response
# Create pending publisher associated with organization
pending_publisher = make_pending_publisher(self.request, form)
try:
self.request.db.add(pending_publisher)
self.request.db.flush() # To get the new ID
except UniqueViolation:
# Double-post protection
return HTTPSeeOther(self.request.path)
# Record event on organization
self.organization.record_event(
tag=EventTag.Organization.PendingOIDCPublisherAdded,
request=self.request,
additional={
"project": pending_publisher.project_name,
"publisher": pending_publisher.publisher_name,
"id": str(pending_publisher.id),
"specifier": str(pending_publisher),
"url": pending_publisher.publisher_url(),
"submitted_by": self.request.user.username,
},
)
self.request.session.flash(
self.request._(
"Registered a new pending publisher to create "
f"the project '{pending_publisher.project_name}' "
f"owned by the '{self.organization.name}' organization."
),
queue="success",
)
self.metrics.increment(
"warehouse.oidc.add_pending_publisher.ok",
tags=[f"publisher:{publisher_name}", "organization:true"],
)
return HTTPSeeOther(self.request.path)
@view_config(
request_method="POST", request_param=PendingGitHubPublisherForm.__params__
)
def add_pending_github_oidc_publisher(self):
form = self.pending_github_publisher_form
return self._add_pending_oidc_publisher(
publisher_name="GitHub",
publisher_class=PendingGitHubPublisher,
admin_flag=AdminFlagValue.DISALLOW_GITHUB_OIDC,
form=form,
make_pending_publisher=lambda request, form: PendingGitHubPublisher(
project_name=form.project_name.data,
added_by=request.user,
repository_name=form.repository.data,
repository_owner=form.normalized_owner,
repository_owner_id=form.owner_id,
workflow_filename=form.workflow_filename.data,
environment=form.normalized_environment,
organization_id=self.organization.id,
),
make_existence_filters=lambda form: dict(
project_name=form.project_name.data,
repository_name=form.repository.data,
repository_owner=form.normalized_owner,
workflow_filename=form.workflow_filename.data,
environment=form.normalized_environment,
),
)
@view_config(
request_method="POST", request_param=PendingGitLabPublisherForm.__params__
)
def add_pending_gitlab_oidc_publisher(self):
form = self.pending_gitlab_publisher_form
return self._add_pending_oidc_publisher(
publisher_name="GitLab",
publisher_class=PendingGitLabPublisher,
admin_flag=AdminFlagValue.DISALLOW_GITLAB_OIDC,
form=form,
make_pending_publisher=lambda request, form: PendingGitLabPublisher(
project_name=form.project_name.data,
added_by=request.user,
namespace=form.namespace.data,
project=form.project.data,
workflow_filepath=form.workflow_filepath.data,
environment=form.environment.data,
issuer_url=form.issuer_url.data,
organization_id=self.organization.id,
),
make_existence_filters=lambda form: dict(
project_name=form.project_name.data,
namespace=form.namespace.data,
project=form.project.data,
workflow_filepath=form.workflow_filepath.data,
environment=form.environment.data,
issuer_url=form.issuer_url.data,
),
)
@view_config(
request_method="POST", request_param=PendingGooglePublisherForm.__params__
)
def add_pending_google_oidc_publisher(self):
form = self.pending_google_publisher_form
return self._add_pending_oidc_publisher(
publisher_name="Google",
publisher_class=PendingGooglePublisher,
admin_flag=AdminFlagValue.DISALLOW_GOOGLE_OIDC,
form=form,
make_pending_publisher=lambda request, form: PendingGooglePublisher(
project_name=form.project_name.data,
added_by=request.user,
email=form.email.data,
sub=form.sub.data,
organization_id=self.organization.id,
),
make_existence_filters=lambda form: dict(
project_name=form.project_name.data,
email=form.email.data,
sub=form.sub.data,
),
)
@view_config(
request_method="POST", request_param=PendingActiveStatePublisherForm.__params__
)
def add_pending_activestate_oidc_publisher(self):
form = self.pending_activestate_publisher_form
return self._add_pending_oidc_publisher(
publisher_name="ActiveState",
publisher_class=PendingActiveStatePublisher,
admin_flag=AdminFlagValue.DISALLOW_ACTIVESTATE_OIDC,
form=form,
make_pending_publisher=lambda request, form: PendingActiveStatePublisher(
project_name=form.project_name.data,
added_by=request.user,
organization=form.organization.data,
activestate_project_name=form.project.data,
actor=form.actor.data,
actor_id=form.actor_id,
organization_id=self.organization.id,
),
make_existence_filters=lambda form: dict(
project_name=form.project_name.data,
organization=form.organization.data,
activestate_project_name=form.project.data,
actor=form.actor.data,
actor_id=form.actor_id,
),
)
| ManageOrganizationPublishingViews |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 14912,
"end": 17036
} | class ____(CommonTestMixin, TestCase):
# when returning a list of strings a shortcut is employed by the server:
# it calculates the content-length and joins all the chunks before sending
validator = None
last_environ = None
def _check_environ(self, input_terminated=True):
if input_terminated:
self.assertTrue(self.last_environ.get('wsgi.input_terminated'))
else:
self.assertFalse(self.last_environ['wsgi.input_terminated'])
def application(self, env, start_response):
self.last_environ = env
path = env['PATH_INFO']
if path == '/':
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'hello ', b'world']
if path == '/websocket':
write = start_response('101 Switching Protocols',
[('Content-Type', 'text/plain'),
# Con:close is to make our simple client
# happy; otherwise it wants to read data from the
# body thot's being kept open.
('Connection', 'close')])
write(b'') # Trigger finalizing the headers now.
return [b'upgrading to', b'websocket']
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'not ', b'found']
def test_basic(self):
response, dne_response = super(TestNoChunks, self).test_basic()
self._check_environ()
self.assertFalse(response.chunks)
response.assertHeader('Content-Length', '11')
if dne_response is not None:
self.assertFalse(dne_response.chunks)
dne_response.assertHeader('Content-Length', '9')
def test_dne(self):
with self.makefile() as fd:
fd.write(self.format_request(path='/notexist'))
response = read_http(fd, code=404, reason='Not Found', body='not found')
self.assertFalse(response.chunks)
self._check_environ()
response.assertHeader('Content-Length', '9')
| TestNoChunks |
python | huggingface__transformers | src/transformers/models/gemma3/image_processing_gemma3_fast.py | {
"start": 1295,
"end": 10268
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = True
do_convert_rgb = True
do_resize = True
do_rescale = True
do_normalize = True
do_pan_and_scan = None
pan_and_scan_min_crop_size = None
pan_and_scan_max_num_crops = None
pan_and_scan_min_ratio_to_activate = None
valid_kwargs = Gemma3ImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Gemma3ImageProcessorKwargs]):
super().__init__(**kwargs)
def pan_and_scan_batched(
self,
images: "torch.Tensor",
pan_and_scan_min_crop_size: int,
pan_and_scan_max_num_crops: int,
pan_and_scan_min_ratio_to_activate: float,
):
"""
Pan and Scan an image, by cropping into smaller images when the aspect ratio exceeds
minimum allowed ratio.
Args:
image (`torch.Tensor`):
Image to resize.
pan_and_scan_min_crop_size (`int`, *optional*):
Minimum size of each crop in pan and scan.
pan_and_scan_max_num_crops (`int`, *optional*):
Maximum number of crops per image in pan and scan.
pan_and_scan_min_ratio_to_activate (`float`, *optional*):
Minimum aspect ratio to activate pan and scan.
"""
height, width = images.shape[-2:]
# Square or landscape image.
if width >= height:
# Only apply PaS if the image is sufficiently exaggerated
if width / height < pan_and_scan_min_ratio_to_activate:
return []
# Select ideal number of crops close to the image aspect ratio and such that crop_size > min_crop_size.
num_crops_w = int(math.floor(width / height + 0.5)) # Half round up rounding.
num_crops_w = min(int(math.floor(width / pan_and_scan_min_crop_size)), num_crops_w)
# Make sure the number of crops is in range [2, pan_and_scan_max_num_crops].
num_crops_w = max(2, num_crops_w)
num_crops_w = min(pan_and_scan_max_num_crops, num_crops_w)
num_crops_h = 1
# Portrait image.
else:
# Only apply PaS if the image is sufficiently exaggerated
if height / width < pan_and_scan_min_ratio_to_activate:
return []
# Select ideal number of crops close to the image aspect ratio and such that crop_size > min_crop_size.
num_crops_h = int(math.floor(height / width + 0.5))
num_crops_h = min(int(math.floor(height / pan_and_scan_min_crop_size)), num_crops_h)
# Make sure the number of crops is in range [2, pan_and_scan_max_num_crops].
num_crops_h = max(2, num_crops_h)
num_crops_h = min(pan_and_scan_max_num_crops, num_crops_h)
num_crops_w = 1
crop_size_w = int(math.ceil(width / num_crops_w))
crop_size_h = int(math.ceil(height / num_crops_h))
# Don't apply PaS if crop size is too small.
if min(crop_size_w, crop_size_h) < pan_and_scan_min_crop_size:
return []
crop_positions_w = [crop_size_w * i for i in range(num_crops_w)]
crop_positions_h = [crop_size_h * i for i in range(num_crops_h)]
return [
images[..., pos_h : pos_h + crop_size_h, pos_w : pos_w + crop_size_w]
for pos_h, pos_w in itertools.product(crop_positions_h, crop_positions_w)
]
def _process_images_for_pan_and_scan(
self,
images: list["torch.Tensor"],
do_pan_and_scan: bool,
pan_and_scan_min_crop_size: int,
pan_and_scan_max_num_crops: int,
pan_and_scan_min_ratio_to_activate: float,
):
pas_images = self.pan_and_scan_batched(
images=images,
pan_and_scan_min_crop_size=pan_and_scan_min_crop_size,
pan_and_scan_max_num_crops=pan_and_scan_max_num_crops,
pan_and_scan_min_ratio_to_activate=pan_and_scan_min_ratio_to_activate,
)
num_crops = [len(pas_images) for _ in images]
return pas_images, num_crops
@auto_docstring
def preprocess(
self,
images: ImageInput,
**kwargs: Unpack[Gemma3ImageProcessorKwargs],
) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list[list["torch.Tensor"]],
do_resize: bool,
size: SizeDict,
do_pan_and_scan: Optional[bool],
pan_and_scan_min_crop_size: Optional[int],
pan_and_scan_max_num_crops: Optional[int],
pan_and_scan_min_ratio_to_activate: Optional[float],
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched processing
processed_images_grouped = {}
num_crops_grouped = {}
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
for shape_images, stacked_images in grouped_images.items():
if do_pan_and_scan:
pas_images, num_crops = self._process_images_for_pan_and_scan(
images=stacked_images,
do_pan_and_scan=do_pan_and_scan,
pan_and_scan_min_crop_size=pan_and_scan_min_crop_size,
pan_and_scan_max_num_crops=pan_and_scan_max_num_crops,
pan_and_scan_min_ratio_to_activate=pan_and_scan_min_ratio_to_activate,
)
# Add the thumbnails to the image patches
stacked_images = [stacked_images] + pas_images
# Group images by size for batched resizing (this will typically group thumbnails together and cropped patches together)
processed_image_patches_grouped = {}
grouped_image_patches, grouped_image_patches_index = group_images_by_shape(
stacked_images, disable_grouping=disable_grouping
)
for shape, stacked_image_patches in grouped_image_patches.items():
stacked_image_patches = self.resize(
image=stacked_image_patches,
size=size,
interpolation=interpolation,
)
processed_image_patches_grouped[shape] = stacked_image_patches
processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index)
# Transpose to have the thumbnails with their corresponding patches
stacked_images = torch.stack(processed_image_patches, dim=0).transpose(0, 1).contiguous()
else:
num_crops = [0 for _ in stacked_images]
if do_resize:
stacked_images = self.resize(
image=stacked_images,
size=size,
interpolation=interpolation,
)
num_crops_grouped[shape_images] = num_crops
processed_images_grouped[shape_images] = stacked_images
resized_images = reorder_images(processed_images_grouped, grouped_images_index)
# If pan and scan is enabled, we need to flatten the list of images
if do_pan_and_scan:
resized_images = [image for images_list in resized_images for image in images_list]
num_crops = reorder_images(num_crops_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(
data={"pixel_values": processed_images, "num_crops": num_crops}, tensor_type=return_tensors
)
__all__ = ["Gemma3ImageProcessorFast"]
| Gemma3ImageProcessorFast |
python | sympy__sympy | sympy/functions/special/bessel.py | {
"start": 32635,
"end": 34488
} | class ____(SphericalBesselBase):
r"""
Spherical Bessel function of the second kind.
Explanation
===========
This function is another solution to the spherical Bessel equation, and
linearly independent from $j_n$. It can be defined as
.. math ::
y_\nu(z) = \sqrt{\frac{\pi}{2z}} Y_{\nu + \frac{1}{2}}(z),
where $Y_\nu(z)$ is the Bessel function of the second kind.
For integral orders $n$, $y_n$ is calculated using the formula:
.. math:: y_n(z) = (-1)^{n+1} j_{-n-1}(z)
Examples
========
>>> from sympy import Symbol, yn, sin, cos, expand_func, besselj, bessely
>>> z = Symbol("z")
>>> nu = Symbol("nu", integer=True)
>>> print(expand_func(yn(0, z)))
-cos(z)/z
>>> expand_func(yn(1, z)) == -cos(z)/z**2-sin(z)/z
True
>>> yn(nu, z).rewrite(besselj)
(-1)**(nu + 1)*sqrt(2)*sqrt(pi)*sqrt(1/z)*besselj(-nu - 1/2, z)/2
>>> yn(nu, z).rewrite(bessely)
sqrt(2)*sqrt(pi)*sqrt(1/z)*bessely(nu + 1/2, z)/2
>>> yn(2, 5.2+0.3j).evalf(20)
0.18525034196069722536 + 0.014895573969924817587*I
See Also
========
besselj, bessely, besselk, jn
References
==========
.. [1] https://dlmf.nist.gov/10.47
"""
@assume_integer_order
def _eval_rewrite_as_besselj(self, nu, z, **kwargs):
return S.NegativeOne**(nu+1) * sqrt(pi/(2*z)) * besselj(-nu - S.Half, z)
@assume_integer_order
def _eval_rewrite_as_bessely(self, nu, z, **kwargs):
return sqrt(pi/(2*z)) * bessely(nu + S.Half, z)
def _eval_rewrite_as_jn(self, nu, z, **kwargs):
return S.NegativeOne**(nu + 1) * jn(-nu - 1, z)
def _expand(self, **hints):
return _yn(self.order, self.argument)
def _eval_evalf(self, prec):
if self.order.is_Integer:
return self.rewrite(bessely)._eval_evalf(prec)
| yn |
python | python-attrs__attrs | src/attr/validators.py | {
"start": 10714,
"end": 13203
} | class ____:
key_validator = attrib(validator=optional(is_callable()))
value_validator = attrib(validator=optional(is_callable()))
mapping_validator = attrib(validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
if self.key_validator is not None:
self.key_validator(inst, attr, key)
if self.value_validator is not None:
self.value_validator(inst, attr, value[key])
def __repr__(self):
return f"<deep_mapping validator for objects mapping {self.key_validator!r} to {self.value_validator!r}>"
def deep_mapping(
key_validator=None, value_validator=None, mapping_validator=None
):
"""
A validator that performs deep validation of a dictionary.
All validators are optional, but at least one of *key_validator* or
*value_validator* must be provided.
Args:
key_validator: Validator(s) to apply to dictionary keys.
value_validator: Validator(s) to apply to dictionary values.
mapping_validator:
Validator(s) to apply to top-level mapping attribute.
.. versionadded:: 19.1.0
.. versionchanged:: 25.4.0
*key_validator* and *value_validator* are now optional, but at least one
of them must be provided.
.. versionchanged:: 25.4.0
*key_validator*, *value_validator*, and *mapping_validator* can now be a
list or tuple of validators.
Raises:
TypeError: If any sub-validator fails on validation.
ValueError:
If neither *key_validator* nor *value_validator* is provided on
instantiation.
"""
if key_validator is None and value_validator is None:
msg = (
"At least one of key_validator or value_validator must be provided"
)
raise ValueError(msg)
if isinstance(key_validator, (list, tuple)):
key_validator = and_(*key_validator)
if isinstance(value_validator, (list, tuple)):
value_validator = and_(*value_validator)
if isinstance(mapping_validator, (list, tuple)):
mapping_validator = and_(*mapping_validator)
return _DeepMapping(key_validator, value_validator, mapping_validator)
@attrs(repr=False, frozen=True, slots=True)
| _DeepMapping |
python | skorch-dev__skorch | skorch/utils.py | {
"start": 22350,
"end": 23635
} | class ____(pickle.Unpickler):
"""
Subclass of pickle.Unpickler that intercepts 'torch.storage._load_from_bytes' calls
and uses `torch.load(..., map_location=..., torch_load_kwargs=...)`.
This way, we can use normal pickle when unpickling a skorch net but still benefit
from torch.load to handle the map_location. Note that `with torch.device(...)` does
not work for unpickling.
"""
def __init__(self, *args, map_location, torch_load_kwargs, **kwargs):
super().__init__(*args, **kwargs)
self.map_location = map_location
self.torch_load_kwargs = torch_load_kwargs
def find_class(self, module, name):
# The actual serialized data for PyTorch tensors references
# torch.storage._load_from_bytes internally. We intercept that call:
if (module == 'torch.storage') and (name == '_load_from_bytes'):
# Return a function that uses torch.load with our desired map_location
def _load_from_bytes(b):
return torch.load(
io.BytesIO(b),
map_location=self.map_location,
**self.torch_load_kwargs
)
return _load_from_bytes
return super().find_class(module, name)
| _TorchLoadUnpickler |
python | kamyu104__LeetCode-Solutions | Python/last-stone-weight-ii.py | {
"start": 33,
"end": 329
} | class ____(object):
def lastStoneWeightII(self, stones):
"""
:type stones: List[int]
:rtype: int
"""
dp = {0}
for stone in stones:
dp |= {stone+i for i in dp}
S = sum(stones)
return min(abs(i-(S-i)) for i in dp)
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mariadbconnector.py | {
"start": 2988,
"end": 3778
} | class ____(MySQLExecutionContext):
_lastrowid: Optional[int] = None
def create_server_side_cursor(self) -> DBAPICursor:
return self._dbapi_connection.cursor(buffered=False)
def create_default_cursor(self) -> DBAPICursor:
return self._dbapi_connection.cursor(buffered=True)
def post_exec(self) -> None:
super().post_exec()
self._rowcount = self.cursor.rowcount
if TYPE_CHECKING:
assert isinstance(self.compiled, SQLCompiler)
if self.isinsert and self.compiled.postfetch_lastrowid:
self._lastrowid = self.cursor.lastrowid
def get_lastrowid(self) -> int:
if TYPE_CHECKING:
assert self._lastrowid is not None
return self._lastrowid
| MySQLExecutionContext_mariadbconnector |
python | doocs__leetcode | solution/0300-0399/0325.Maximum Size Subarray Sum Equals k/Solution.py | {
"start": 0,
"end": 317
} | class ____:
def maxSubArrayLen(self, nums: List[int], k: int) -> int:
d = {0: -1}
ans = s = 0
for i, x in enumerate(nums):
s += x
if s - k in d:
ans = max(ans, i - d[s - k])
if s not in d:
d[s] = i
return ans
| Solution |
python | doocs__leetcode | lcp/LCP 39. 无人机方阵/Solution.py | {
"start": 0,
"end": 364
} | class ____:
def minimumSwitchingTimes(
self, source: List[List[int]], target: List[List[int]]
) -> int:
cnt = Counter()
for row in source:
for x in row:
cnt[x] += 1
for row in target:
for x in row:
cnt[x] -= 1
return sum(abs(x) for x in cnt.values()) // 2
| Solution |
python | django__django | tests/queries/tests.py | {
"start": 116747,
"end": 119649
} | class ____(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
{lunch, dinner},
)
def test_in_subquery(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Eaten.objects.filter(food__in=Food.objects.filter(name="apple"))),
{lunch},
)
self.assertEqual(
set(
Eaten.objects.filter(
food__in=Food.objects.filter(name="apple").values("eaten__meal")
)
),
set(),
)
self.assertEqual(
set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal="lunch"))),
{apple},
)
def test_nested_in_subquery(self):
extra = ExtraInfo.objects.create()
author = Author.objects.create(num=42, extra=extra)
report = Report.objects.create(creator=author)
comment = ReportComment.objects.create(report=report)
comments = ReportComment.objects.filter(
report__in=Report.objects.filter(
creator__in=extra.author_set.all(),
),
)
self.assertSequenceEqual(comments, [comment])
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear}
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(set(Eaten.objects.filter(food=apple)), {lunch, dinner})
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(set(Food.objects.filter(eaten=lunch)), {apple})
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(parent=node1)), [node2])
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(list(Node.objects.filter(node=node2)), [node1])
| ToFieldTests |
python | openai__openai-python | src/openai/types/responses/response_input_audio.py | {
"start": 415,
"end": 574
} | class ____(BaseModel):
input_audio: InputAudio
type: Literal["input_audio"]
"""The type of the input item. Always `input_audio`."""
| ResponseInputAudio |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/codeFlow4.py | {
"start": 692,
"end": 1989
} | class ____(Enum):
RED = 1
BLUE = 2
GREEN = 3
PERIWINKLE = 4
def func4(x: Color):
if x == Color.RED:
return
if x == Color.GREEN or (x == Color.PERIWINKLE and True):
y = 2
else:
if x == Color.BLUE:
y = 3
print(y)
def func5():
if True:
y = 2
print(y)
def func6():
if not None:
y = 2
print(y)
def func7(color: Color) -> str:
if color == Color.RED or color == Color.BLUE:
return "yes"
elif color == Color.GREEN or color == Color.PERIWINKLE:
return "no"
def func8(color: Color) -> bool:
if color == Color.RED or color == Color.BLUE:
return True
elif color == Color.GREEN or color == Color.PERIWINKLE:
return False
reveal_type(func8(Color.RED), expected_text="bool")
def func9(a: str | int, b: str | int) -> bool:
if isinstance(a, str):
return True
elif isinstance(a, int):
if isinstance(b, str):
return False
elif isinstance(b, int):
return False
def func10(foo: list[str]) -> bool:
i = 0
x: int | None = None
while i < 5:
foo[i]
if x is None:
return False
reveal_type(x, expected_text="Never")
i = x
return True
| Color |
python | mitsuhiko__rye | rye-devtools/src/rye_devtools/find_downloads.py | {
"start": 780,
"end": 946
} | class ____:
implementation: PythonImplementation
@abc.abstractmethod
async def find(self) -> list[PythonDownload]:
raise NotImplementedError
| Finder |
python | encode__django-rest-framework | rest_framework/generics.py | {
"start": 8447,
"end": 8987
} | class ____(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericAPIView):
"""
Concrete view for retrieving, updating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
| RetrieveUpdateAPIView |
python | donnemartin__interactive-coding-challenges | graphs_trees/tree_bfs/test_bfs.py | {
"start": 18,
"end": 545
} | class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBfs, self).__init__()
self.results = Results()
def test_bfs(self):
bst = BstBfs(Node(5))
bst.insert(2)
bst.insert(8)
bst.insert(1)
bst.insert(3)
bst.bfs(self.results.add_result)
self.assertEqual(str(self.results), '[5, 2, 8, 1, 3]')
print('Success: test_bfs')
def main():
test = TestBfs()
test.test_bfs()
if __name__ == '__main__':
main()
| TestBfs |
python | kamyu104__LeetCode-Solutions | Python/smallest-k-length-subsequence-with-occurrences-of-a-letter.py | {
"start": 29,
"end": 781
} | class ____(object):
def smallestSubsequence(self, s, k, letter, repetition):
"""
:type s: str
:type k: int
:type letter: str
:type repetition: int
:rtype: str
"""
stk = []
suffix = [0]*(len(s)+1)
for i in reversed(xrange(len(suffix)-1)):
suffix[i] = suffix[i+1]+(s[i] == letter)
for i, c in enumerate(s):
while stk and stk[-1] > c and len(stk)+(len(s)-i) > k and (stk[-1] != letter or repetition+1 <= suffix[i]):
repetition += (stk.pop() == letter)
if len(stk) < min(k-(repetition-(c == letter)), k):
repetition -= (c == letter)
stk.append(c)
return "".join(stk)
| Solution |
python | matplotlib__matplotlib | galleries/examples/user_interfaces/embedding_in_wx4_sgskip.py | {
"start": 2229,
"end": 2475
} | class ____(wx.App):
def OnInit(self):
"""Create the main window and insert the custom frame."""
frame = CanvasFrame()
frame.Show(True)
return True
if __name__ == "__main__":
app = App()
app.MainLoop()
| App |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 16037,
"end": 18308
} | class ____:
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
conditions = [np.array([False, False, False]),
np.array([False, True, False]),
np.array([False, False, True])]
def _select(self, cond, values, default=0):
output = []
for m in range(len(cond)):
output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
return output
def test_basic(self):
choices = self.choices
conditions = self.conditions
assert_array_equal(select(conditions, choices, default=15),
self._select(conditions, choices, default=15))
assert_equal(len(choices), 3)
assert_equal(len(conditions), 3)
def test_broadcasting(self):
conditions = [np.array(True), np.array([False, True, False])]
choices = [1, np.arange(12).reshape(4, 3)]
assert_array_equal(select(conditions, choices), np.ones((4, 3)))
# default can broadcast too:
assert_equal(select([True], [0], default=[0]).shape, (1,))
def test_return_dtype(self):
assert_equal(select(self.conditions, self.choices, 1j).dtype,
np.complex128)
# But the conditions need to be stronger then the scalar default
# if it is scalar.
choices = [choice.astype(np.int8) for choice in self.choices]
assert_equal(select(self.conditions, choices).dtype, np.int8)
d = np.array([1, 2, 3, np.nan, 5, 7])
m = np.isnan(d)
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
conditions[0] = conditions[0].astype(np.int_)
assert_raises(TypeError, select, conditions, choices)
conditions[0] = conditions[0].astype(np.uint8)
assert_raises(TypeError, select, conditions, choices)
assert_raises(TypeError, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
conditions = [np.array([False])] * 100
choices = [np.array([1])] * 100
select(conditions, choices)
| TestSelect |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_comment08.py | {
"start": 315,
"end": 1154
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("comment08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_comment("A1", "Some text")
worksheet.write_comment("A2", "Some text")
worksheet.write_comment("A3", "Some text", {"visible": False})
worksheet.write_comment("A4", "Some text", {"visible": True})
worksheet.write_comment("A5", "Some text")
worksheet.show_comments()
worksheet.set_comments_author("John")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/vendorsb/package.py | {
"start": 217,
"end": 589
} | class ____(Package):
"""A package that vendors another, and thus conflicts with it"""
homepage = "http://www.example.com"
url = "http://www.example.com/b-1.0.tar.gz"
version("1.1", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
# pkg-b is not a dependency
conflicts("pkg-b", when="@=1.1")
| Vendorsb |
python | kubernetes-client__python | kubernetes/client/models/v1_host_path_volume_source.py | {
"start": 383,
"end": 4785
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'path': 'str',
'type': 'str'
}
attribute_map = {
'path': 'path',
'type': 'type'
}
def __init__(self, path=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1HostPathVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._path = None
self._type = None
self.discriminator = None
self.path = path
if type is not None:
self.type = type
@property
def path(self):
"""Gets the path of this V1HostPathVolumeSource. # noqa: E501
path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:return: The path of this V1HostPathVolumeSource. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1HostPathVolumeSource.
path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:param path: The path of this V1HostPathVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
self._path = path
@property
def type(self):
"""Gets the type of this V1HostPathVolumeSource. # noqa: E501
type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:return: The type of this V1HostPathVolumeSource. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1HostPathVolumeSource.
type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:param type: The type of this V1HostPathVolumeSource. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HostPathVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1HostPathVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1HostPathVolumeSource |
python | doocs__leetcode | solution/0100-0199/0188.Best Time to Buy and Sell Stock IV/Solution2.py | {
"start": 0,
"end": 489
} | class ____:
def maxProfit(self, k: int, prices: List[int]) -> int:
n = len(prices)
f = [[[0] * 2 for _ in range(k + 1)] for _ in range(n)]
for j in range(1, k + 1):
f[0][j][1] = -prices[0]
for i, x in enumerate(prices[1:], 1):
for j in range(1, k + 1):
f[i][j][0] = max(f[i - 1][j][1] + x, f[i - 1][j][0])
f[i][j][1] = max(f[i - 1][j - 1][0] - x, f[i - 1][j][1])
return f[n - 1][k][0]
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_area03.py | {
"start": 315,
"end": 1190
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_area03.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1:XFD1")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 85734,
"end": 111436
} | class ____(NonStrictDataModel):
"""
:param id: Task id
:type id: str
:param name: Task Name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company ID
:type company: str
:param type: Type of task. Values: 'dataset_import', 'annotation', 'training',
'testing'
:type type: TaskTypeEnum
:param status:
:type status: TaskStatusEnum
:param comment: Free text comment
:type comment: str
:param created: Task creation time (UTC)
:type created: datetime.datetime
:param started: Task start time (UTC)
:type started: datetime.datetime
:param completed: Task end time (UTC)
:type completed: datetime.datetime
:param active_duration: Task duration time (seconds)
:type active_duration: int
:param parent: Parent task id
:type parent: str
:param project: Project ID of the project to which this task is assigned
:type project: str
:param input: Task input params
:type input: Input
:param output: Task output params
:type output: Output
:param execution: Task execution params
:type execution: Execution
:param container: Docker container parameters
:type container: dict
:param models: Task models
:type models: TaskModels
:param script: Script info
:type script: Script
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param status_changed: Last status change time
:type status_changed: datetime.datetime
:param status_message: free text string representing info about the status
:type status_message: str
:param status_reason: Reason for last status change
:type status_reason: str
:param published: Task publish time
:type published: datetime.datetime
:param last_worker: ID of last worker that handled the task
:type last_worker: str
:param last_worker_report: Last time a worker reported while working on this
task
:type last_worker_report: datetime.datetime
:param last_update: Last time this task was created, edited, changed or events
for this task were reported
:type last_update: datetime.datetime
:param last_change: Last time any update was done to the task
:type last_change: datetime.datetime
:param last_iteration: Last iteration reported for this task
:type last_iteration: int
:param last_metrics: Last metric variants (hash to events), one for each metric
hash
:type last_metrics: dict
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param runtime: Task runtime mapping
:type runtime: dict
"""
_schema = {
"properties": {
"active_duration": {
"description": "Task duration time (seconds)",
"type": ["integer", "null"],
},
"comment": {"description": "Free text comment", "type": ["string", "null"]},
"company": {"description": "Company ID", "type": ["string", "null"]},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": ["object", "null"],
},
"container": {
"additionalProperties": {"type": ["string", "null"]},
"description": "Docker container parameters",
"type": ["object", "null"],
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [{"$ref": "#/definitions/execution"}, {"type": "null"}],
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"input": {
"description": "Task input params",
"oneOf": [{"$ref": "#/definitions/input"}, {"type": "null"}],
},
"last_change": {
"description": "Last time any update was done to the task",
"format": "date-time",
"type": ["string", "null"],
},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {"$ref": "#/definitions/last_metrics_variants"},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": "Last time this task was created, edited, changed or events for this task were reported",
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"models": {
"description": "Task models",
"oneOf": [{"$ref": "#/definitions/task_models"}, {"type": "null"}],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {"description": "Parent task id", "type": ["string", "null"]},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Task publish time",
"format": "date-time",
"type": ["string", "null"],
},
"runtime": {
"additionalProperties": True,
"description": "Task runtime mapping",
"type": ["object", "null"],
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [{"$ref": "#/definitions/task_status_enum"}, {"type": "null"}],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'dataset_import', 'annotation', 'training', 'testing'",
"oneOf": [{"$ref": "#/definitions/task_type_enum"}, {"type": "null"}],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id=None,
name=None,
user=None,
company=None,
type=None,
status=None,
comment=None,
created=None,
started=None,
completed=None,
active_duration=None,
parent=None,
project=None,
input=None,
output=None,
execution=None,
container=None,
models=None,
script=None,
tags=None,
system_tags=None,
status_changed=None,
status_message=None,
status_reason=None,
published=None,
last_worker=None,
last_worker_report=None,
last_update=None,
last_change=None,
last_iteration=None,
last_metrics=None,
hyperparams=None,
configuration=None,
runtime=None,
**kwargs
):
super(Task, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.type = type
self.status = status
self.comment = comment
self.created = created
self.started = started
self.completed = completed
self.active_duration = active_duration
self.parent = parent
self.project = project
self.input = input
self.output = output
self.execution = execution
self.container = container
self.models = models
self.script = script
self.tags = tags
self.system_tags = system_tags
self.status_changed = status_changed
self.status_message = status_message
self.status_reason = status_reason
self.published = published
self.last_worker = last_worker
self.last_worker_report = last_worker_report
self.last_update = last_update
self.last_change = last_change
self.last_iteration = last_iteration
self.last_metrics = last_metrics
self.hyperparams = hyperparams
self.configuration = configuration
self.runtime = runtime
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self):
return self._property_user
@user.setter
def user(self, value):
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self):
return self._property_company
@company.setter
def company(self, value):
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("status")
def status(self):
return self._property_status
@status.setter
def status(self, value):
if value is None:
self._property_status = None
return
if isinstance(value, six.string_types):
try:
value = TaskStatusEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "status", enum.Enum)
self._property_status = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("created")
def created(self):
return self._property_created
@created.setter
def created(self, value):
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("started")
def started(self):
return self._property_started
@started.setter
def started(self, value):
if value is None:
self._property_started = None
return
self.assert_isinstance(value, "started", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_started = value
@schema_property("completed")
def completed(self):
return self._property_completed
@completed.setter
def completed(self, value):
if value is None:
self._property_completed = None
return
self.assert_isinstance(value, "completed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_completed = value
@schema_property("active_duration")
def active_duration(self):
return self._property_active_duration
@active_duration.setter
def active_duration(self, value):
if value is None:
self._property_active_duration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "active_duration", six.integer_types)
self._property_active_duration = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("input")
def input(self):
return self._property_input
@input.setter
def input(self, value):
if value is None:
self._property_input = None
return
if isinstance(value, dict):
value = Input.from_dict(value)
else:
self.assert_isinstance(value, "input", Input)
self._property_input = value
@schema_property("output")
def output(self):
return self._property_output
@output.setter
def output(self, value):
if value is None:
self._property_output = None
return
if isinstance(value, dict):
value = Output.from_dict(value)
else:
self.assert_isinstance(value, "output", Output)
self._property_output = value
@schema_property("execution")
def execution(self):
return self._property_execution
@execution.setter
def execution(self, value):
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("container")
def container(self):
return self._property_container
@container.setter
def container(self, value):
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", (dict,))
self._property_container = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
def models(self, value):
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("script")
def script(self):
return self._property_script
@script.setter
def script(self, value):
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status_changed")
def status_changed(self):
return self._property_status_changed
@status_changed.setter
def status_changed(self, value):
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_status_changed = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("published")
def published(self):
return self._property_published
@published.setter
def published(self, value):
if value is None:
self._property_published = None
return
self.assert_isinstance(value, "published", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_published = value
@schema_property("last_worker")
def last_worker(self):
return self._property_last_worker
@last_worker.setter
def last_worker(self, value):
if value is None:
self._property_last_worker = None
return
self.assert_isinstance(value, "last_worker", six.string_types)
self._property_last_worker = value
@schema_property("last_worker_report")
def last_worker_report(self):
return self._property_last_worker_report
@last_worker_report.setter
def last_worker_report(self, value):
if value is None:
self._property_last_worker_report = None
return
self.assert_isinstance(
value, "last_worker_report", six.string_types + (datetime,)
)
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_worker_report = value
@schema_property("last_update")
def last_update(self):
return self._property_last_update
@last_update.setter
def last_update(self, value):
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("last_change")
def last_change(self):
return self._property_last_change
@last_change.setter
def last_change(self, value):
if value is None:
self._property_last_change = None
return
self.assert_isinstance(value, "last_change", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_change = value
@schema_property("last_iteration")
def last_iteration(self):
return self._property_last_iteration
@last_iteration.setter
def last_iteration(self, value):
if value is None:
self._property_last_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "last_iteration", six.integer_types)
self._property_last_iteration = value
@schema_property("last_metrics")
def last_metrics(self):
return self._property_last_metrics
@last_metrics.setter
def last_metrics(self, value):
if value is None:
self._property_last_metrics = None
return
self.assert_isinstance(value, "last_metrics", (dict,))
self._property_last_metrics = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(
value.keys(), "hyperparams_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(), "hyperparams_values", (SectionParams, dict), is_array=True
)
value = dict(
(k, SectionParams(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(
value.keys(), "configuration_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(
(k, ConfigurationItem(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_configuration = value
@schema_property("runtime")
def runtime(self):
return self._property_runtime
@runtime.setter
def runtime(self, value):
if value is None:
self._property_runtime = None
return
self.assert_isinstance(value, "runtime", (dict,))
self._property_runtime = value
| Task |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.