language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | facelessuser__pymdown-extensions | tests/test_extensions/test_magiclink.py | {
"start": 2372,
"end": 3287
} | class ____(util.MdCase):
"""Test cases for repo link shortening."""
extension = [
'pymdownx.magiclink',
]
extension_configs = {
'pymdownx.magiclink': {
'repo_url_shorthand': True,
'user': 'facelessuser',
'repo': 'pymdown-extensions'
}
}
def test_discuss(self):
"""Test discuss."""
self.check_markdown(
r'?1173',
r'<p><a class="magiclink magiclink-github magiclink-discussion" href="https://github.com/facelessuser/pymdown-extensions/discussions/1173" title="GitHub Discussion: facelessuser/pymdown-extensions #1173">?1173</a></p>' # noqa: E501
)
def test_bad_discss(self):
"""Test repo that doesn't support discussions."""
self.check_markdown(
r'gitlab:user/repo?1173',
r'<p>gitlab:user/repo?1173</p>'
)
| TestMagicLinkShorthand |
python | google__jax | jax/_src/named_sharding.py | {
"start": 11013,
"end": 11800
} | class ____:
axes: Sequence[str]
is_open: bool
def build(self) -> sdy.DimensionShardingAttr:
return sdy.DimensionShardingAttr.get(
[sdy.AxisRefAttr.get(axis) for axis in self.axes],
is_closed=not self.is_open)
def __repr__(self):
return f'SdyDim({self._custom_repr()})'
def _custom_repr(self):
axes_repr = ', '.join(f"'{a}'" for a in self.axes)
open_repr = ''
if self.is_open:
open_repr = ', ?' if self.axes else '?'
return f'{{{axes_repr}{open_repr}}}'
def _get_axes(axes, mesh_shape):
if not axes:
return ()
assert mesh_shape is not None
# Sort wrt mesh axis names so order is deterministic and doesn't hang in
# McJAX.
return tuple(n for n, _ in mesh_shape if n in axes)
@dataclasses.dataclass(kw_only=True)
| SdyDim |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-tavily-research/llama_index/tools/tavily_research/base.py | {
"start": 170,
"end": 2732
} | class ____(BaseToolSpec):
"""Tavily tool spec."""
spec_functions = [
"search",
"extract",
]
def __init__(self, api_key: str) -> None:
"""Initialize with parameters."""
from tavily import TavilyClient
self.client = TavilyClient(api_key=api_key)
def search(self, query: str, max_results: Optional[int] = 6) -> List[Document]:
"""
Run query through Tavily Search and return metadata.
Args:
query: The query to search for.
max_results: The maximum number of results to return.
Returns:
results: A list of dictionaries containing the results:
url: The url of the result.
content: The content of the result.
"""
response = self.client.search(
query, max_results=max_results, search_depth="advanced"
)
return [
Document(text=result["content"], extra_info={"url": result["url"]})
for result in response["results"]
]
def extract(
self,
urls: List[str],
include_images: bool = False,
include_favicon: bool = False,
extract_depth: str = "basic",
format: str = "markdown",
) -> List[Document]:
"""
Extract raw content from a URL using Tavily Extract API.
Args:
urls: The URL(s) to extract content from.
include_images: Whether to include images in the response.
include_favicon: Whether to include the favicon in the response.
extract_depth: 'basic' or 'advanced' (default: 'basic').
format: 'markdown' or 'text' (default: 'markdown').
Returns:
A list of Document objects containing the extracted content and metadata,
or an empty list if no results were returned.
"""
response = self.client.extract(
urls,
include_images=include_images,
include_favicon=include_favicon,
extract_depth=extract_depth,
format=format,
)
results = response.get("results", [])
if not results:
return []
return [
Document(
text=result.get("raw_content", ""),
extra_info={
"url": result.get("url"),
"favicon": result.get("favicon"),
"images": result.get("images"),
},
)
for result in results
]
| TavilyToolSpec |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 12257,
"end": 12594
} | class ____(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
| LinalgNonsquareTestCase |
python | tiangolo__fastapi | docs_src/query_param_models/tutorial002_py310.py | {
"start": 120,
"end": 462
} | class ____(BaseModel):
model_config = {"extra": "forbid"}
limit: int = Field(100, gt=0, le=100)
offset: int = Field(0, ge=0)
order_by: Literal["created_at", "updated_at"] = "created_at"
tags: list[str] = []
@app.get("/items/")
async def read_items(filter_query: FilterParams = Query()):
return filter_query
| FilterParams |
python | viewflow__viewflow | tests/json/test_json__char.py | {
"start": 302,
"end": 1123
} | class ____(TestCase):
def test_crud(self):
model = CharFieldModel(char_field='test')
self.assertIsInstance(
model._meta.get_field('char_field'),
models.CharField
)
self.assertEqual(model.data, {
'char_field': 'test',
# 'required_char_field': '',
})
# TODO: Add validation for required fields. Should raise an error on save
model.save()
model = CharFieldModel.objects.get()
self.assertEqual(model.data, {
'char_field': 'test',
# 'required_char_field': '',
})
self.assertEqual(model.char_field, 'test')
def test_null_value(self):
model = CharFieldModel()
self.assertEqual(model.char_field, None)
self.assertEqual(model.data, {})
| Test |
python | ray-project__ray | rllib/algorithms/sac/tests/test_sac.py | {
"start": 597,
"end": 1481
} | class ____(gym.Env):
def __init__(self, config):
if config.get("simplex_actions", False):
self.action_space = Simplex((2,))
else:
self.action_space = Box(0.0, 1.0, (1,))
self.observation_space = Box(0.0, 1.0, (1,))
self.max_steps = config.get("max_steps", 100)
self.state = None
self.steps = None
def reset(self, *, seed=None, options=None):
self.state = self.observation_space.sample()
self.steps = 0
return self.state, {}
def step(self, action):
self.steps += 1
# Reward is 1.0 - (max(actions) - state).
[rew] = 1.0 - np.abs(np.max(action) - self.state)
terminated = False
truncated = self.steps >= self.max_steps
self.state = self.observation_space.sample()
return self.state, rew, terminated, truncated, {}
| SimpleEnv |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_schemas.py | {
"start": 2045,
"end": 2979
} | class ____:
"""Type constraint for a parameter.
Attributes:
name: Name of the parameter. E.g. "TFloat"
allowed_types: Allowed types for the parameter.
"""
name: str
allowed_types: set[ir.TypeProtocol]
description: str = ""
def __hash__(self) -> int:
return hash((self.name, tuple(self.allowed_types)))
def __str__(self) -> str:
allowed_types_str = " | ".join(str(t) for t in self.allowed_types)
return f"{self.name}={allowed_types_str}"
@classmethod
def any_tensor(cls, name: str, description: str = "") -> TypeConstraintParam:
return cls(name, {ir.TensorType(dtype) for dtype in ir.DataType}, description)
@classmethod
def any_value(cls, name: str, description: str = "") -> TypeConstraintParam:
return cls(name, _ALL_VALUE_TYPES, description) # type: ignore[arg-type]
@dataclasses.dataclass(frozen=True)
| TypeConstraintParam |
python | google__jax | jax/_src/pallas/mosaic/core.py | {
"start": 8712,
"end": 12520
} | class ____:
"""A mesh of TensorCores."""
devices: np.ndarray
axis_names: Sequence[str]
def __init__(self, devices: np.ndarray, axis_names: Sequence[str]):
devices = np.copy(devices)
devices.setflags(write=False)
object.__setattr__(self, "devices", devices)
object.__setattr__(self, "axis_names", tuple(axis_names))
def __hash__(self) -> int:
return hash(
(self.devices.shape, tuple(np.ravel(self.devices)), self.axis_names)
)
@property
def backend(self) -> str:
return "mosaic_tpu"
@property
def shape(self):
return collections.OrderedDict(zip(self.axis_names, self.devices.shape))
def discharges_effect(self, effect: jax_core.Effect):
del effect
return False
def create_tensorcore_mesh(
axis_name: str,
devices: Sequence[jax.Device] | None = None,
num_cores: int | None = None,
) -> TensorCoreMesh:
if devices is not None and num_cores is not None:
raise ValueError('cannot specify both devices and num_cores')
if num_cores is None:
if devices is None:
abstract_device = jax.sharding.get_abstract_mesh().abstract_device
if abstract_device is None:
devices = [jax.devices()[0]]
else:
devices = [abstract_device]
num_cores = devices[0].num_cores
return TensorCoreMesh(
np.array([TensorCore(i) for i in range(num_cores)]),
[axis_name],
)
def _tensorcore_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh,
jaxpr,
compiler_params: Any | None,
interpret: Any,
debug: bool,
cost_estimate: pallas_core.CostEstimate | None,
name: str,
metadata: FrozenDict[str, str] | None,
):
assert isinstance(mesh, TensorCoreMesh)
if compiler_params and not isinstance(compiler_params, CompilerParams):
raise ValueError(
"compiler_params must be a pltpu.CompilerParams"
)
if not compiler_params:
compiler_params = CompilerParams()
if len(mesh.shape) > 1:
raise NotImplementedError("Mesh must be 1D")
if compiler_params.dimension_semantics is not None:
raise ValueError(
"dimension_semantics must be None for TensorCoreMesh"
)
num_cores = len(mesh.devices)
if num_cores > 1:
# Since each core will have its own VMEM, we currently disallow VMEM inputs
# and outputs since other ops might not agree on how they are sharded across
# cores by the (core-mapped) kernel.
if any(
pallas_core.get_memory_space_aval(aval) == MemorySpace.VMEM
for aval in in_avals
):
raise NotImplementedError(
"TensorCoreMesh does not support VMEM inputs/outputs when there are"
" >1 cores. Use HBM or ANY instead."
)
return pallas_core.default_mesh_discharge_rule(
in_avals,
out_avals,
*args,
jaxpr=jaxpr,
mesh=mesh,
compiler_params=compiler_params.replace(
dimension_semantics=(PARALLEL,)
),
debug=debug,
interpret=interpret,
cost_estimate=cost_estimate,
name=name,
metadata=metadata,
scratch_shapes=[],
)
pallas_core._core_map_mesh_rules[TensorCoreMesh] = (
_tensorcore_mesh_discharge_rule
)
def _convert_semaphore_type_to_aval(
out_shape: SemaphoreType,
) -> jax_core.AbstractValue:
return out_shape.get_array_aval()
pallas_core._out_shape_to_aval_mapping[SemaphoreType] = (
_convert_semaphore_type_to_aval
)
def get_device_kind() -> str:
if abstract_device := jax.sharding.get_abstract_mesh().abstract_device:
return abstract_device.device_kind
return jex_backend.get_default_device().device_kind
def get_num_device_cores() -> int:
if abstract_device := jax.sharding.get_abstract_mesh().abstract_device:
return abstract_device.num_cores
return jex_backend.get_default_device().num_cores
| TensorCoreMesh |
python | huggingface__transformers | src/transformers/models/pvt/modeling_pvt.py | {
"start": 20447,
"end": 22611
} | class ____(PvtPreTrainedModel):
def __init__(self, config: PvtConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.pvt = PvtModel(config)
# Classifier head
self.classifier = (
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor],
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.pvt(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output[:, 0, :])
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["PvtForImageClassification", "PvtModel", "PvtPreTrainedModel"]
| PvtForImageClassification |
python | FactoryBoy__factory_boy | tests/djapp/models.py | {
"start": 1116,
"end": 1161
} | class ____(StandardModel):
pass
| StandardSon |
python | django__django | django/contrib/postgres/fields/ranges.py | {
"start": 10719,
"end": 10876
} | class ____(models.Transform):
lookup_name = "isempty"
function = "isempty"
output_field = models.BooleanField()
@RangeField.register_lookup
| IsEmpty |
python | coleifer__peewee | tests/migrations.py | {
"start": 798,
"end": 912
} | class ____(TestModel):
first_name = CharField()
last_name = CharField()
dob = DateField(null=True)
| Person |
python | pyca__cryptography | tests/hazmat/primitives/test_pkcs7.py | {
"start": 41195,
"end": 51646
} | class ____:
@pytest.fixture(name="data")
def fixture_data(self, backend) -> bytes:
return b"Hello world!\n"
@pytest.fixture(name="certificate")
def fixture_certificate(self, backend) -> x509.Certificate:
certificate, _ = _load_rsa_cert_key()
return certificate
@pytest.fixture(name="private_key")
def fixture_private_key(self, backend) -> rsa.RSAPrivateKey:
_, private_key = _load_rsa_cert_key()
return private_key
def test_unsupported_certificate_encryption(self, backend, private_key):
cert_non_rsa, _ = _load_cert_key()
with pytest.raises(TypeError):
pkcs7.pkcs7_decrypt_der(b"", cert_non_rsa, private_key, [])
def test_not_a_cert(self, backend, private_key):
with pytest.raises(TypeError):
pkcs7.pkcs7_decrypt_der(b"", b"wrong_type", private_key, []) # type: ignore[arg-type]
def test_not_a_pkey(self, backend, certificate):
with pytest.raises(TypeError):
pkcs7.pkcs7_decrypt_der(b"", certificate, b"wrong_type", []) # type: ignore[arg-type]
@pytest.mark.parametrize(
"invalid_options",
[
[b"invalid"],
[pkcs7.PKCS7Options.NoAttributes],
[pkcs7.PKCS7Options.Binary],
],
)
def test_pkcs7_decrypt_invalid_options(
self, backend, invalid_options, data, certificate, private_key
):
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_der(
data, certificate, private_key, invalid_options
)
@pytest.mark.parametrize("options", [[], [pkcs7.PKCS7Options.Text]])
def test_pkcs7_decrypt_der(
self, backend, data, certificate, private_key, options
):
# Encryption
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(serialization.Encoding.DER, options)
# Test decryption: new lines are canonicalized to '\r\n' when
# encryption has no Binary option
decrypted = pkcs7.pkcs7_decrypt_der(
enveloped, certificate, private_key, options
)
assert decrypted == data.replace(b"\n", b"\r\n")
def test_pkcs7_decrypt_aes_256_cbc_encrypted_content(
self, backend, data, certificate, private_key
):
# Encryption
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.set_content_encryption_algorithm(algorithms.AES256)
.add_recipient(certificate)
)
enveloped = builder.encrypt(serialization.Encoding.PEM, [])
# Test decryption: new lines are canonicalized to '\r\n' when
# encryption has no Binary option
decrypted = pkcs7.pkcs7_decrypt_pem(
enveloped, certificate, private_key, []
)
assert decrypted == data.replace(b"\n", b"\r\n")
@pytest.mark.parametrize(
"header",
[
"content-type: text/plain",
"CONTENT-TYPE: text/plain",
"MIME-Version: 1.0\r\nContent-Type: text/plain; charset='UTF-8'"
"\r\nContent-Transfer-Encoding: 7bit\r\nFrom: sender@example.com"
"\r\nTo: recipient@example.com\r\nSubject: Test Email",
],
)
def test_pkcs7_decrypt_der_text_handmade_header(
self, backend, certificate, private_key, header
):
# Encryption of data with a custom header
base_data = "Hello world!\r\n"
data = f"{header}\r\n\r\n{base_data}".encode()
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(
serialization.Encoding.DER, [pkcs7.PKCS7Options.Binary]
)
# Test decryption with text option
decrypted = pkcs7.pkcs7_decrypt_der(
enveloped, certificate, private_key, [pkcs7.PKCS7Options.Text]
)
assert decrypted == base_data.encode()
@pytest.mark.parametrize("options", [[], [pkcs7.PKCS7Options.Text]])
def test_pkcs7_decrypt_pem(
self, backend, data, certificate, private_key, options
):
# Encryption
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(serialization.Encoding.PEM, options)
# Test decryption: new lines are canonicalized to '\r\n' when
# encryption has no Binary option
decrypted = pkcs7.pkcs7_decrypt_pem(
enveloped, certificate, private_key, options
)
assert decrypted == data.replace(b"\n", b"\r\n")
def test_pkcs7_decrypt_pem_with_wrong_tag(
self, backend, data, certificate, private_key
):
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_pem(
certificate.public_bytes(serialization.Encoding.PEM),
certificate,
private_key,
[],
)
@pytest.mark.parametrize("options", [[], [pkcs7.PKCS7Options.Text]])
def test_pkcs7_decrypt_smime(
self, backend, data, certificate, private_key, options
):
# Encryption
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(serialization.Encoding.SMIME, options)
# Test decryption
decrypted = pkcs7.pkcs7_decrypt_smime(
enveloped, certificate, private_key, options
)
assert decrypted == data.replace(b"\n", b"\r\n")
def test_pkcs7_decrypt_no_encrypted_content(
self, backend, data, certificate, private_key
):
enveloped = load_vectors_from_file(
os.path.join("pkcs7", "enveloped-no-content.der"),
loader=lambda pemfile: pemfile.read(),
mode="rb",
)
# Test decryption with text option
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_der(enveloped, certificate, private_key, [])
def test_pkcs7_decrypt_text_no_header(
self, backend, data, certificate, private_key
):
# Encryption of data without a header (no "Text" option)
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(serialization.Encoding.DER, [])
# Test decryption with text option
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_der(
enveloped, certificate, private_key, [pkcs7.PKCS7Options.Text]
)
def test_pkcs7_decrypt_text_html_content_type(
self, backend, certificate, private_key
):
# Encryption of data with a text/html content type header
data = b"Content-Type: text/html\r\n\r\nHello world!<br>"
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(
serialization.Encoding.DER, [pkcs7.PKCS7Options.Binary]
)
# Test decryption with text option
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_der(
enveloped, certificate, private_key, [pkcs7.PKCS7Options.Text]
)
def test_smime_decrypt_no_recipient_match(
self, backend, data, certificate, rsa_key_2048: rsa.RSAPrivateKey
):
# Encrypt some data with one RSA chain
builder = (
pkcs7.PKCS7EnvelopeBuilder()
.set_data(data)
.add_recipient(certificate)
)
enveloped = builder.encrypt(serialization.Encoding.DER, [])
# Prepare another RSA chain
another_private_key = RSA_KEY_2048_ALT.private_key(
unsafe_skip_rsa_key_validation=True
)
_, another_cert = _generate_ca_and_leaf(
rsa_key_2048, another_private_key
)
# Test decryption with another RSA chain
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_der(
enveloped, another_cert, another_private_key, []
)
def test_smime_decrypt_unsupported_key_encryption_algorithm(
self, backend, data, certificate, private_key
):
enveloped = load_vectors_from_file(
os.path.join("pkcs7", "enveloped-rsa-oaep.pem"),
loader=lambda pemfile: pemfile.read(),
mode="rb",
)
with pytest.raises(exceptions.UnsupportedAlgorithm):
pkcs7.pkcs7_decrypt_pem(enveloped, certificate, private_key, [])
def test_smime_decrypt_unsupported_content_encryption_algorithm(
self, backend, data, certificate, private_key
):
enveloped = load_vectors_from_file(
os.path.join("pkcs7", "enveloped-triple-des.pem"),
loader=lambda pemfile: pemfile.read(),
mode="rb",
)
with pytest.raises(exceptions.UnsupportedAlgorithm):
pkcs7.pkcs7_decrypt_pem(enveloped, certificate, private_key, [])
def test_smime_decrypt_not_enveloped(
self, backend, data, certificate, private_key
):
# Create a signed email
cert, key = _load_cert_key()
options = [pkcs7.PKCS7Options.DetachedSignature]
builder = (
pkcs7.PKCS7SignatureBuilder()
.set_data(data)
.add_signer(cert, key, hashes.SHA256())
)
signed = builder.sign(serialization.Encoding.DER, options)
# Test decryption failure with signed email
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_der(signed, certificate, private_key, [])
def test_smime_decrypt_smime_not_encrypted(
self, backend, certificate, private_key
):
# Create a plain email
email_message = EmailMessage()
email_message.set_content("Hello world!")
# Test decryption failure with plain email
with pytest.raises(ValueError):
pkcs7.pkcs7_decrypt_smime(
email_message.as_bytes(), certificate, private_key, []
)
@pytest.mark.supported(
only_if=lambda backend: backend.pkcs7_supported(),
skip_message="Requires OpenSSL with PKCS7 support",
)
| TestPKCS7Decrypt |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 15767,
"end": 16301
} | class ____(graphene.ObjectType, AssetEventMixin):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneDisplayableEvent)
name = "ObservationEvent"
def __init__(self, event):
observation = event.dagster_event.asset_observation_data.asset_observation
super().__init__(**_construct_asset_event_metadata_params(event, observation))
AssetEventMixin.__init__(
self,
event=event,
metadata=observation,
)
| GrapheneObservationEvent |
python | jazzband__django-oauth-toolkit | tests/test_authorization_code.py | {
"start": 26701,
"end": 28637
} | class ____(BaseTest):
def get_auth(self, scope="read write"):
"""
Helper method to retrieve a valid authorization code
"""
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": scope,
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
return query_dict["code"].pop()
def generate_pkce_codes(self, algorithm, length=43):
"""
Helper method to generate pkce codes
"""
code_verifier = get_random_string(length)
if algorithm == "S256":
code_challenge = (
base64.urlsafe_b64encode(hashlib.sha256(code_verifier.encode()).digest()).decode().rstrip("=")
)
else:
code_challenge = code_verifier
return code_verifier, code_challenge
def get_pkce_auth(self, code_challenge, code_challenge_method):
"""
Helper method to retrieve a valid authorization code using pkce
"""
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
"code_challenge": code_challenge,
"code_challenge_method": code_challenge_method,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
return query_dict["code"].pop()
@pytest.mark.oauth2_settings(presets.DEFAULT_SCOPES_RW)
| BaseAuthorizationCodeTokenView |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/xent_op_d9m_test.py | {
"start": 1464,
"end": 2830
} | class ____(test.TestCase):
"""Test d9m-unimplemented exceptions from SoftmaxXentWithLogitsOp.
Test that tf.errors.UnimplementedError is thrown, as appropriate, by the GPU
code-paths through SoftmaxXentWithLogitsOp when deterministic ops are
enabled.
This test assumes that xent_op_test.py runs equivalent test cases when
deterministic ops are not enabled and will therefore detect erroneous
exception throwing in those cases.
"""
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testExceptionThrowing(self):
with self.session(), test_util.force_gpu():
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
features = constant_op.constant([[0.3, 0.5], [0.5, 0.6]], dtype=dtype)
labels = constant_op.constant([[0.2, 0.4], [0.1, 0.2]], dtype=dtype)
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"The GPU implementation of SoftmaxCrossEntropyWithLogits that " +
"would have been executed is not deterministic. Note that the " +
"Python API uses an alternative, deterministic, GPU-accelerated " +
"path when determinism is enabled."):
result = gen_nn_ops.softmax_cross_entropy_with_logits(
features=features, labels=labels)
self.evaluate(result)
| XentOpDeterminismExceptionsTest |
python | mitmproxy__pdoc | test/testdata/misc.py | {
"start": 2966,
"end": 3949
} | class ____:
def foo(self):
"""no indents"""
def bar(self):
"""no
indents"""
def baz(self):
"""one
indent"""
def qux(self):
"""
two
indents
"""
@lru_cache()
def foo_decorated(self):
"""no indents"""
@lru_cache()
# comment
def foo_commented(self):
"""no indents"""
@lru_cache()
def bar_decorated(self):
"""no
indents"""
@lru_cache()
def baz_decorated(self):
"""one
indent"""
@lru_cache()
def qux_decorated(self):
"""
two
indents
"""
@lru_cache(
maxsize=42
)
def quux_decorated(self):
"""multi-line decorator, https://github.com/mitmproxy/pdoc/issues/246"""
def _protected_decorator(f):
return f
@_protected_decorator
def fun_with_protected_decorator():
"""This function has a protected decorator (name starting with a single `_`)."""
| Indented |
python | facebook__pyre-check | tools/typeshed_patcher/buck.py | {
"start": 466,
"end": 739
} | class ____:
mapping: Mapping[pathlib.Path, pathlib.Path] = dataclasses.field(
default_factory=dict
)
def to_string(self) -> str:
return json.dumps({str(k): str(v) for k, v in self.mapping.items()})
@dataclasses.dataclass(frozen=True)
| MappedSource |
python | getsentry__sentry | src/sentry/integrations/discord/webhooks/message_component.py | {
"start": 2468,
"end": 11690
} | class ____(DiscordInteractionHandler):
"""
Handles logic for Discord Message Component interactions.
Request passed in constructor must be a Message Component interaction.
"""
def __init__(self, request: DiscordRequest) -> None:
super().__init__(request)
self.custom_id: str = request.get_component_custom_id()
self.user: RpcUser
# Everything after the colon is the group id in a custom_id
custom_id_parts = self.custom_id.split(":")
self.group_id: str = custom_id_parts[1] if len(custom_id_parts) > 1 else ""
@cached_property
def group(self) -> Group | None:
try:
group_id = int(self.group_id)
return Group.objects.filter(id=group_id).first()
except Exception:
return None
def handle(self) -> Response:
logging_data = self.request.logging_data
if self.request.user is None:
logger.warning("discord.interaction.component.not_linked", extra={**logging_data})
return self.send_message(NO_IDENTITY)
self.user = self.request.user
if (not self.group_id) or (not self.group):
return self.send_message(INVALID_GROUP_ID)
if not self.group.organization.has_access(self.user):
logger.warning(
"discord.interaction.component.not_in_org",
extra={"org_slug": self.group.organization.slug, **logging_data},
)
return self.send_message(NOT_IN_ORG)
def record_event(interaction_type: MessagingInteractionType) -> MessagingInteractionEvent:
return MessagingInteractionEvent(
interaction_type,
DiscordMessagingSpec(),
user=self.user,
organization=(self.group.organization if self.group else None),
)
if self.custom_id.startswith(CustomIds.ASSIGN_DIALOG):
logger.info("discord.interaction.component.assign_dialog", extra={**logging_data})
with record_event(MessagingInteractionType.ASSIGN_DIALOG).capture():
return self.assign_dialog()
elif self.custom_id.startswith(CustomIds.ASSIGN):
logger.info(
"discord.interaction.component.assign",
extra={**logging_data, "assign_to": self.request.get_selected_options()[0]},
)
with record_event(MessagingInteractionType.ASSIGN).capture():
return self.assign()
elif self.custom_id.startswith(CustomIds.RESOLVE_DIALOG):
logger.info("discord.interaction.component.resolve_dialog", extra={**logging_data})
with record_event(MessagingInteractionType.RESOLVE_DIALOG).capture():
return self.resolve_dialog()
elif self.custom_id.startswith(CustomIds.RESOLVE):
logger.info("discord.interaction.component.resolve", extra={**logging_data})
with record_event(MessagingInteractionType.RESOLVE).capture():
return self.resolve()
elif self.custom_id.startswith(CustomIds.UNRESOLVE):
logger.info("discord.interaction.component.unresolve", extra={**logging_data})
with record_event(MessagingInteractionType.UNRESOLVE).capture():
return self.unresolve()
elif self.custom_id.startswith(CustomIds.MARK_ONGOING):
logger.info("discord.interaction.component.mark_ongoing", extra={**logging_data})
with record_event(MessagingInteractionType.MARK_ONGOING).capture():
return self.unresolve(from_mark_ongoing=True)
elif self.custom_id.startswith(CustomIds.ARCHIVE):
logger.info("discord.interaction.component.archive", extra={**logging_data})
with record_event(MessagingInteractionType.ARCHIVE).capture():
return self.archive()
logger.warning("discord.interaction.component.unknown_custom_id", extra={**logging_data})
return self.send_message(INVALID_GROUP_ID)
def assign_dialog(self) -> Response:
if (not self.group_id) or (not self.group):
return self.send_message(INVALID_GROUP_ID)
assign_selector = DiscordSelectMenu(
custom_id=f"{CustomIds.ASSIGN}:{self.group_id}",
placeholder="Select Assignee...",
options=get_assign_selector_options(self.group),
)
message = DiscordMessageBuilder(
components=[DiscordActionRow([assign_selector])],
flags=DiscordMessageFlags().set_ephemeral(),
)
return self.send_message(message)
def assign(self) -> Response:
assignee = self.request.get_selected_options()[0]
self.update_group(
{
"assignedTo": assignee,
"integration": ActivityIntegration.DISCORD.value,
}
)
logger.info(
"discord.assign.dialog",
extra={
"assignee": assignee,
"user": self.request.user,
},
)
assert self.request.user is not None
analytics.record(
DiscordIntegrationAssign(
actor_id=self.request.user.id,
)
)
message = DiscordMessageBuilder(
content=ASSIGNEE_UPDATED,
flags=DiscordMessageFlags().set_ephemeral(),
)
return self.send_message(message, update=True)
def resolve_dialog(self) -> Response:
resolve_selector = DiscordSelectMenu(
custom_id=f"{CustomIds.RESOLVE}:{self.group_id}",
placeholder="Select the resolution target",
options=RESOLVE_DIALOG_OPTIONS,
)
message = DiscordMessageBuilder(
components=[DiscordActionRow([resolve_selector])],
flags=DiscordMessageFlags().set_ephemeral(),
)
return self.send_message(message)
def resolve(self) -> Response:
status: dict[str, object] = {
"status": STATUS_TO_STRING_LOOKUP[GroupHistoryStatus.RESOLVED],
}
message = RESOLVED
selected_option = ""
if self.request.is_select_component():
selected_option = self.request.get_selected_options()[0]
if selected_option == "inNextRelease":
status["statusDetails"] = {"inNextRelease": True}
message = RESOLVED_IN_NEXT_RELEASE
elif selected_option == "inCurrentRelease":
status["statusDetails"] = {"inRelease": "latest"}
message = RESOLVED_IN_CURRENT_RELEASE
self.update_group(status)
return self.send_message(message, update=self.request.is_select_component())
def unresolve(self, from_mark_ongoing: bool = False) -> Response:
self.update_group(
{
"status": STATUS_TO_STRING_LOOKUP[GroupHistoryStatus.UNRESOLVED],
"substatus": SUBSTATUS_TO_STR[GroupSubStatus.ONGOING],
}
)
if from_mark_ongoing:
return self.send_message(MARKED_ONGOING)
return self.send_message(UNRESOLVED)
def archive(self) -> Response:
self.update_group(
{
"status": STATUS_TO_STRING_LOOKUP[GroupHistoryStatus.IGNORED],
"substatus": SUBSTATUS_TO_STR[GroupSubStatus.UNTIL_ESCALATING],
}
)
return self.send_message(ARCHIVE_UNTIL_ESCALATES)
def update_group(self, data: Mapping[str, object]) -> None:
if self.group:
try:
analytics.record(
DiscordIntegrationStatus(
organization_id=self.group.organization.id,
user_id=self.user.id,
status=str(data),
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
update_groups(
request=self.request.request, groups=[self.group], user=self.user, data=data
)
def get_assign_selector_options(group: Group) -> list[DiscordSelectMenuOption]:
"""
Helper function for building the new assignee dropdown.
"""
all_members = group.project.get_members_as_rpc_users()
members = list({m.id: m for m in all_members}.values())
teams = group.project.teams.all()
assignee = group.get_assignee()
options = []
# We don't have the luxury of option groups like Slack has, so we will just
# list all the teams and then all the members.
if teams:
team_options = [
DiscordSelectMenuOption(
label=f"#{team.slug}", value=f"team:{team.id}", default=(team == assignee)
)
for team in teams
]
options.extend(sorted(team_options, key=lambda t: t.label))
if members:
member_options = [
DiscordSelectMenuOption(
label=member.get_display_name(),
value=f"user:{member.id}",
default=(member == assignee),
)
for member in members
]
options.extend(sorted(member_options, key=lambda m: m.label))
return options
| DiscordMessageComponentHandler |
python | tiangolo__fastapi | docs_src/body_multiple_params/tutorial003.py | {
"start": 242,
"end": 548
} | class ____(BaseModel):
username: str
full_name: Union[str, None] = None
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item, user: User, importance: int = Body()):
results = {"item_id": item_id, "item": item, "user": user, "importance": importance}
return results
| User |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1034063,
"end": 1034802
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateOrganizationWebCommitSignoffSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "message", "organization")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the web commit signoff
setting.
"""
organization = sgqlc.types.Field("Organization", graphql_name="organization")
"""The organization with the updated web commit signoff setting."""
| UpdateOrganizationWebCommitSignoffSettingPayload |
python | pennersr__django-allauth | allauth/socialaccount/providers/trainingpeaks/provider.py | {
"start": 242,
"end": 418
} | class ____(ProviderAccount):
def get_profile_url(self):
return "https://app.trainingpeaks.com"
def get_avatar_url(self):
return None
| TrainingPeaksAccount |
python | walkccc__LeetCode | solutions/688. Knight Probability in Chessboard/688.py | {
"start": 0,
"end": 635
} | class ____:
def knightProbability(self, n: int, k: int, row: int, column: int) -> float:
DIRS = ((1, 2), (2, 1), (2, -1), (1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2))
# dp[i][j] := the probability to stand on (i, j)
dp = [[0] * n for _ in range(n)]
dp[row][column] = 1.0
for _ in range(k):
newDp = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
for dx, dy in DIRS:
x = i + dx
y = j + dy
if 0 <= x < n and 0 <= y < n:
newDp[i][j] += dp[x][y]
dp = newDp
return sum(map(sum, dp)) / 8**k
| Solution |
python | django-haystack__django-haystack | test_haystack/test_generic_views.py | {
"start": 181,
"end": 2525
} | class ____(TestCase):
"""Test case for the generic search views."""
def setUp(self):
super().setUp()
self.query = "haystack"
self.request = self.get_request(url="/some/random/url?q={0}".format(self.query))
def test_get_form_kwargs(self):
"""Test getting the search view form kwargs."""
v = SearchView()
v.request = self.request
form_kwargs = v.get_form_kwargs()
self.assertEqual(form_kwargs.get("data").get("q"), self.query)
self.assertEqual(form_kwargs.get("initial"), {})
self.assertTrue("searchqueryset" in form_kwargs)
self.assertTrue("load_all" in form_kwargs)
def test_search_view_response(self):
"""Test the generic SearchView response."""
response = SearchView.as_view()(request=self.request)
context = response.context_data
self.assertEqual(context["query"], self.query)
self.assertEqual(context.get("view").__class__, SearchView)
self.assertEqual(context.get("form").__class__, ModelSearchForm)
self.assertIn("page_obj", context)
self.assertNotIn("page", context)
def test_search_view_form_valid(self):
"""Test the generic SearchView form is valid."""
v = SearchView()
v.kwargs = {}
v.request = self.request
form = v.get_form(v.get_form_class())
response = v.form_valid(form)
context = response.context_data
self.assertEqual(context["query"], self.query)
def test_search_view_form_invalid(self):
"""Test the generic SearchView form is invalid."""
v = SearchView()
v.kwargs = {}
v.request = self.request
form = v.get_form(v.get_form_class())
response = v.form_invalid(form)
context = response.context_data
self.assertTrue("query" not in context)
def get_request(self, url, method="get", data=None, **kwargs):
"""Gets the request object for the view.
:param url: a mock url to use for the request
:param method: the http method to use for the request ('get', 'post',
etc).
"""
factory = RequestFactory()
factory_func = getattr(factory, method)
request = factory_func(url, data=data or {}, **kwargs)
return request
| GenericSearchViewsTestCase |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_subclass.py | {
"start": 62,
"end": 115
} | class ____(Categorical):
pass
| SubclassedCategorical |
python | plotly__plotly.py | plotly/graph_objs/layout/polar/_radialaxis.py | {
"start": 235,
"end": 77748
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.polar"
_path_str = "layout.polar.radialaxis"
_valid_props = {
"angle",
"autorange",
"autorangeoptions",
"autotickangles",
"autotypenumbers",
"calendar",
"categoryarray",
"categoryarraysrc",
"categoryorder",
"color",
"dtick",
"exponentformat",
"gridcolor",
"griddash",
"gridwidth",
"hoverformat",
"labelalias",
"layer",
"linecolor",
"linewidth",
"maxallowed",
"minallowed",
"minexponent",
"minorloglabels",
"nticks",
"range",
"rangemode",
"separatethousands",
"showexponent",
"showgrid",
"showline",
"showticklabels",
"showtickprefix",
"showticksuffix",
"side",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"type",
"uirevision",
"visible",
}
@property
def angle(self):
"""
Sets the angle (in degrees) from which the radial axis is
drawn. Note that by default, radial axis line on the theta=0
line corresponds to a line pointing right (like what
mathematicians prefer). Defaults to the first `polar.sector`
angle.
The 'angle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["angle"]
@angle.setter
def angle(self, val):
self["angle"] = val
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided and it has a value for both the lower and
upper bound, `autorange` is set to False. Using "min" applies
autorange only to set the minimum. Using "max" applies
autorange only to set the maximum. Using *min reversed* applies
autorange only to set the minimum on a reversed axis. Using
*max reversed* applies autorange only to set the maximum on a
reversed axis. Using "reversed" applies autorange on both ends
and reverses the axis direction.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed', 'min reversed', 'max reversed',
'min', 'max']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
@property
def autorangeoptions(self):
"""
The 'autorangeoptions' property is an instance of Autorangeoptions
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Autorangeoptions`
- A dict of string/value properties that will be passed
to the Autorangeoptions constructor
Returns
-------
plotly.graph_objs.layout.polar.radialaxis.Autorangeoptions
"""
return self["autorangeoptions"]
@autorangeoptions.setter
def autorangeoptions(self, val):
self["autorangeoptions"] = val
@property
def autotickangles(self):
"""
When `tickangle` is set to "auto", it will be set to the first
angle in this array that is large enough to prevent label
overlap.
The 'autotickangles' property is an info array that may be specified as:
* a list of elements where:
The 'autotickangles[i]' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
list
"""
return self["autotickangles"]
@autotickangles.setter
def autotickangles(self, val):
self["autotickangles"] = val
@property
def autotypenumbers(self):
"""
Using "strict" a numeric string in trace data is not converted
to a number. Using *convert types* a numeric string in trace
data may be treated as a number during automatic axis `type`
detection. Defaults to layout.autotypenumbers.
The 'autotypenumbers' property is an enumeration that may be specified as:
- One of the following enumeration values:
['convert types', 'strict']
Returns
-------
Any
"""
return self["autotypenumbers"]
@autotypenumbers.setter
def autotypenumbers(self, val):
self["autotypenumbers"] = val
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean, geometric mean or median
of all the values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'geometric mean ascending',
'geometric mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def layer(self):
"""
Sets the layer on which this axis is displayed. If *above
traces*, this axis is displayed above all the subplot's traces
If *below traces*, this axis is displayed below all the
subplot's traces, but above the grid lines. Useful when used
together with scatter-like traces with `cliponaxis` set to
False to show markers and/or text nodes above this axis.
The 'layer' property is an enumeration that may be specified as:
- One of the following enumeration values:
['above traces', 'below traces']
Returns
-------
Any
"""
return self["layer"]
@layer.setter
def layer(self, val):
self["layer"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def maxallowed(self):
"""
Determines the maximum range of this axis.
The 'maxallowed' property accepts values of any type
Returns
-------
Any
"""
return self["maxallowed"]
@maxallowed.setter
def maxallowed(self, val):
self["maxallowed"] = val
@property
def minallowed(self):
"""
Determines the minimum range of this axis.
The 'minallowed' property accepts values of any type
Returns
-------
Any
"""
return self["minallowed"]
@minallowed.setter
def minallowed(self, val):
self["minallowed"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def minorloglabels(self):
"""
Determines how minor log labels are displayed. If *small
digits*, small digits i.e. 2 or 5 are displayed. If "complete",
complete digits are displayed. If "none", no labels are
displayed.
The 'minorloglabels' property is an enumeration that may be specified as:
- One of the following enumeration values:
['small digits', 'complete', 'none']
Returns
-------
Any
"""
return self["minorloglabels"]
@minorloglabels.setter
def minorloglabels(self, val):
self["minorloglabels"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
Leaving either or both elements `null` impacts the default
`autorange`.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def rangemode(self):
"""
If "tozero", the range extends to 0, regardless of the input
data If "nonnegative", the range is non-negative, regardless of
the input data. If "normal", the range is computed in relation
to the extrema of the input data (same behavior as for
cartesian axes).
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['tozero', 'nonnegative', 'normal']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def side(self):
"""
Determines on which side of radial axis line the tick and tick
labels appear.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['clockwise', 'counterclockwise']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.polar.radialaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.polar.radialaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.layout.polar.radialaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.layout.polar.radial
axis.tickformatstopdefaults), sets the default property values
to use for elements of layout.polar.radialaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.layout.polar.radialaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.polar.radialaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.layout.polar.radialaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def uirevision(self):
"""
Controls persistence of user-driven changes in axis `range`,
`autorange`, `angle`, and `title` if in `editable: true`
configuration. Defaults to `polar<N>.uirevision`.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
angle
Sets the angle (in degrees) from which the radial axis
is drawn. Note that by default, radial axis line on the
theta=0 line corresponds to a line pointing right (like
what mathematicians prefer). Defaults to the first
`polar.sector` angle.
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided and it has a
value for both the lower and upper bound, `autorange`
is set to False. Using "min" applies autorange only to
set the minimum. Using "max" applies autorange only to
set the maximum. Using *min reversed* applies autorange
only to set the minimum on a reversed axis. Using *max
reversed* applies autorange only to set the maximum on
a reversed axis. Using "reversed" applies autorange on
both ends and reverses the axis direction.
autorangeoptions
:class:`plotly.graph_objects.layout.polar.radialaxis.Au
torangeoptions` instance or dict with compatible
properties
autotickangles
When `tickangle` is set to "auto", it will be set to
the first angle in this array that is large enough to
prevent label overlap.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
maxallowed
Determines the maximum range of this axis.
minallowed
Determines the minimum range of this axis.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
minorloglabels
Determines how minor log labels are displayed. If
*small digits*, small digits i.e. 2 or 5 are displayed.
If "complete", complete digits are displayed. If
"none", no labels are displayed.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears. Leaving either or both elements `null` impacts
the default `autorange`.
rangemode
If "tozero", the range extends to 0, regardless of the
input data If "nonnegative", the range is non-negative,
regardless of the input data. If "normal", the range is
computed in relation to the extrema of the input data
(same behavior as for cartesian axes).
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
side
Determines on which side of radial axis line the tick
and tick labels appear.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.polar.ra
dialaxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.pola
r.radialaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.polar.radialaxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.polar.radialaxis.Ti
tle` instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
uirevision
Controls persistence of user-driven changes in axis
`range`, `autorange`, `angle`, and `title` if in
`editable: true` configuration. Defaults to
`polar<N>.uirevision`.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
"""
def __init__(
self,
arg=None,
angle=None,
autorange=None,
autorangeoptions=None,
autotickangles=None,
autotypenumbers=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
griddash=None,
gridwidth=None,
hoverformat=None,
labelalias=None,
layer=None,
linecolor=None,
linewidth=None,
maxallowed=None,
minallowed=None,
minexponent=None,
minorloglabels=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showexponent=None,
showgrid=None,
showline=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
side=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
type=None,
uirevision=None,
visible=None,
**kwargs,
):
"""
Construct a new RadialAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.polar.RadialAxis`
angle
Sets the angle (in degrees) from which the radial axis
is drawn. Note that by default, radial axis line on the
theta=0 line corresponds to a line pointing right (like
what mathematicians prefer). Defaults to the first
`polar.sector` angle.
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided and it has a
value for both the lower and upper bound, `autorange`
is set to False. Using "min" applies autorange only to
set the minimum. Using "max" applies autorange only to
set the maximum. Using *min reversed* applies autorange
only to set the minimum on a reversed axis. Using *max
reversed* applies autorange only to set the maximum on
a reversed axis. Using "reversed" applies autorange on
both ends and reverses the axis direction.
autorangeoptions
:class:`plotly.graph_objects.layout.polar.radialaxis.Au
torangeoptions` instance or dict with compatible
properties
autotickangles
When `tickangle` is set to "auto", it will be set to
the first angle in this array that is large enough to
prevent label overlap.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
layer
Sets the layer on which this axis is displayed. If
*above traces*, this axis is displayed above all the
subplot's traces If *below traces*, this axis is
displayed below all the subplot's traces, but above the
grid lines. Useful when used together with scatter-like
traces with `cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
maxallowed
Determines the maximum range of this axis.
minallowed
Determines the minimum range of this axis.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
minorloglabels
Determines how minor log labels are displayed. If
*small digits*, small digits i.e. 2 or 5 are displayed.
If "complete", complete digits are displayed. If
"none", no labels are displayed.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears. Leaving either or both elements `null` impacts
the default `autorange`.
rangemode
If "tozero", the range extends to 0, regardless of the
input data If "nonnegative", the range is non-negative,
regardless of the input data. If "normal", the range is
computed in relation to the extrema of the input data
(same behavior as for cartesian axes).
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
side
Determines on which side of radial axis line the tick
and tick labels appear.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.polar.ra
dialaxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.layout.pola
r.radialaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.polar.radialaxis.tickformatstops
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.polar.radialaxis.Ti
tle` instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
uirevision
Controls persistence of user-driven changes in axis
`range`, `autorange`, `angle`, and `title` if in
`editable: true` configuration. Defaults to
`polar<N>.uirevision`.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
Returns
-------
RadialAxis
"""
super().__init__("radialaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.polar.RadialAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.polar.RadialAxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("angle", arg, angle)
self._set_property("autorange", arg, autorange)
self._set_property("autorangeoptions", arg, autorangeoptions)
self._set_property("autotickangles", arg, autotickangles)
self._set_property("autotypenumbers", arg, autotypenumbers)
self._set_property("calendar", arg, calendar)
self._set_property("categoryarray", arg, categoryarray)
self._set_property("categoryarraysrc", arg, categoryarraysrc)
self._set_property("categoryorder", arg, categoryorder)
self._set_property("color", arg, color)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("hoverformat", arg, hoverformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("layer", arg, layer)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("maxallowed", arg, maxallowed)
self._set_property("minallowed", arg, minallowed)
self._set_property("minexponent", arg, minexponent)
self._set_property("minorloglabels", arg, minorloglabels)
self._set_property("nticks", arg, nticks)
self._set_property("range", arg, range)
self._set_property("rangemode", arg, rangemode)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("side", arg, side)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("type", arg, type)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| RadialAxis |
python | django__django | tests/inspectdb/models.py | {
"start": 4725,
"end": 5084
} | class ____(models.Model):
name = models.CharField(max_length=255)
rank = models.IntegerField()
class Meta:
constraints = [
models.UniqueConstraint(
Lower("name"), models.F("rank"), name="index_lower_name"
)
]
required_db_features = {"supports_expression_indexes"}
| FuncUniqueConstraint |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 231213,
"end": 232704
} | class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(
self,
client_id: str,
client_secret: str,
access_token: str,
refresh_token: str,
credentials: Optional[str] = None,
):
self.credentials = check.opt_str_param(credentials, "credentials")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.access_token = check.str_param(access_token, "access_token")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class AccessToken:
@public
def __init__(self, access_token: str, credentials: Optional[str] = None):
self.credentials = check.opt_str_param(credentials, "credentials")
self.access_token = check.str_param(access_token, "access_token")
@public
def __init__(
self, name: str, credentials: Union["DriftSource.OAuth20", "DriftSource.AccessToken"]
):
"""Airbyte Source for Drift.
Documentation can be found at https://docs.airbyte.com/integrations/sources/drift
Args:
name (str): The name of the destination.
"""
self.credentials = check.inst_param(
credentials, "credentials", (DriftSource.OAuth20, DriftSource.AccessToken)
)
super().__init__("Drift", name)
| DriftSource |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 14026,
"end": 15471
} | class ____(IntegrationBase, unittest.TestCase):
# make sure views registered for a route "win" over views registered
# without one, even though the context of the non-route view may
# be more specific than the route view.
package = 'tests.pkgs.hybridapp'
def test_root(self):
res = self.testapp.get('/', status=200)
self.assertEqual(res.body, b'global')
def test_abc(self):
res = self.testapp.get('/abc', status=200)
self.assertEqual(res.body, b'route')
def test_def(self):
res = self.testapp.get('/def', status=200)
self.assertEqual(res.body, b'route2')
def test_ghi(self):
res = self.testapp.get('/ghi', status=200)
self.assertEqual(res.body, b'global')
def test_jkl(self):
self.testapp.get('/jkl', status=404)
def test_mno(self):
self.testapp.get('/mno', status=404)
def test_pqr_global2(self):
res = self.testapp.get('/pqr/global2', status=200)
self.assertEqual(res.body, b'global2')
def test_error(self):
res = self.testapp.get('/error', status=200)
self.assertEqual(res.body, b'supressed')
def test_error2(self):
res = self.testapp.get('/error2', status=200)
self.assertEqual(res.body, b'supressed2')
def test_error_sub(self):
res = self.testapp.get('/error_sub', status=200)
self.assertEqual(res.body, b'supressed2')
| TestHybridApp |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensor_dry_run.py | {
"start": 537,
"end": 739
} | class ____(graphene.Union):
class Meta:
types = (GraphenePythonError, GrapheneSensorNotFoundError, GrapheneDryRunInstigationTick)
name = "SensorDryRunResult"
| GrapheneSensorDryRunResult |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 8570,
"end": 8726
} | class ____(AtomicRule):
"""integrate(1/sqrt(1-x**2), x) -> asin(x)"""
def eval(self) -> Expr:
return asin(self.variable)
@dataclass
| ArcsinRule |
python | vyperlang__vyper | vyper/evm/assembler/instructions.py | {
"start": 3565,
"end": 4672
} | class ____:
def __init__(self, label: Label | CONSTREF, ofst: int):
# label can be Label or CONSTREF
assert isinstance(label, (Label, CONSTREF))
self.label = label
self.ofst = ofst
def __repr__(self):
label = self.label
if isinstance(label, Label):
label = label.label # str
return f"PUSH_OFST({label}, {self.ofst})"
def __eq__(self, other):
if not isinstance(other, PUSH_OFST):
return False
return self.label == other.label and self.ofst == other.ofst
def __hash__(self):
return hash((self.label, self.ofst))
def JUMP(label: Label):
return [PUSHLABEL(label), "JUMP"]
def JUMPI(label: Label):
return [PUSHLABEL(label), "JUMPI"]
def mkdebug(pc_debugger, ast_source):
# compile debug instructions
# (this is dead code -- CMC 2025-05-08)
i = TaggedInstruction("DEBUG", ast_source)
i.pc_debugger = pc_debugger
return [i]
AssemblyInstruction = (
str | TaggedInstruction | int | PUSHLABEL | Label | PUSH_OFST | DATA_ITEM | DataHeader | CONST
)
| PUSH_OFST |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/core/dbt_cli_invocation.py | {
"start": 2059,
"end": 17505
} | class ____:
"""The representation of an invoked dbt command.
Args:
process (subprocess.Popen): The process running the dbt command.
manifest (Mapping[str, Any]): The dbt manifest blob.
project (Optional[DbtProject]): The dbt project.
project_dir (Path): The path to the dbt project.
target_path (Path): The path to the dbt target folder.
raise_on_error (bool): Whether to raise an exception if the dbt command fails.
"""
process: subprocess.Popen
manifest: Mapping[str, Any]
dagster_dbt_translator: DagsterDbtTranslator
project_dir: Path
target_path: Path
raise_on_error: bool
cli_version: version.Version
project: Optional[DbtProject] = field(default=None)
context: Optional[Union[OpExecutionContext, AssetExecutionContext]] = field(
default=None, repr=False
)
termination_timeout_seconds: float = field(
init=False, default=DAGSTER_DBT_TERMINATION_TIMEOUT_SECONDS
)
adapter: Optional[BaseAdapter] = field(default=None)
postprocessing_threadpool_num_threads: int = field(
init=False, default=DEFAULT_EVENT_POSTPROCESSING_THREADPOOL_SIZE
)
_stdout: list[Union[str, dict[str, Any]]] = field(init=False, default_factory=list)
_error_messages: list[str] = field(init=False, default_factory=list)
# Caches fetching relation column metadata to avoid redundant queries to the database.
_relation_column_metadata_cache: dict[RelationKey, RelationData] = field(
init=False, default_factory=dict
)
def _get_columns_from_dbt_resource_props(
self, adapter: BaseAdapter, dbt_resource_props: dict[str, Any]
) -> RelationData:
"""Given a dbt resource properties dictionary, fetches the resource's column metadata from
the database, or returns the cached metadata if it has already been fetched.
"""
relation_key = RelationKey(
database=dbt_resource_props["database"],
schema=dbt_resource_props["schema"],
identifier=(
dbt_resource_props["identifier"]
if dbt_resource_props["unique_id"].startswith("source")
else dbt_resource_props["alias"]
),
)
if relation_key in self._relation_column_metadata_cache:
return self._relation_column_metadata_cache[relation_key]
relation = _get_relation_from_adapter(adapter=adapter, relation_key=relation_key)
cols: list = adapter.get_columns_in_relation(relation=relation)
return self._relation_column_metadata_cache.setdefault(
relation_key, RelationData(name=str(relation), columns=cols)
)
@classmethod
def run(
cls,
args: Sequence[str],
env: dict[str, str],
manifest: Mapping[str, Any],
dagster_dbt_translator: DagsterDbtTranslator,
project_dir: Path,
target_path: Path,
raise_on_error: bool,
context: Optional[Union[OpExecutionContext, AssetExecutionContext]],
adapter: Optional[BaseAdapter],
cli_version: version.Version,
dbt_project: Optional[DbtProject] = None,
) -> "DbtCliInvocation":
# Attempt to take advantage of partial parsing. If there is a `partial_parse.msgpack` in
# in the target folder, then copy it to the dynamic target path.
#
# This effectively allows us to skip the parsing of the manifest, which can be expensive.
# See https://docs.getdbt.com/reference/programmatic-invocations#reusing-objects for more
# details.
current_target_path = _get_dbt_target_path()
partial_parse_file_path = (
current_target_path.joinpath(PARTIAL_PARSE_FILE_NAME)
if current_target_path.is_absolute()
else project_dir.joinpath(current_target_path, PARTIAL_PARSE_FILE_NAME)
)
partial_parse_destination_target_path = target_path.joinpath(PARTIAL_PARSE_FILE_NAME)
if partial_parse_file_path.exists() and not partial_parse_destination_target_path.exists():
logger.info(
f"Copying `{partial_parse_file_path}` to `{partial_parse_destination_target_path}`"
" to take advantage of partial parsing."
)
partial_parse_destination_target_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(partial_parse_file_path, partial_parse_destination_target_path)
# Create a subprocess that runs the dbt CLI command.
process = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
cwd=project_dir,
)
dbt_cli_invocation = cls(
process=process,
manifest=manifest,
project=dbt_project,
dagster_dbt_translator=dagster_dbt_translator,
project_dir=project_dir,
target_path=target_path,
raise_on_error=raise_on_error,
context=context,
adapter=adapter,
cli_version=cli_version,
)
logger.info(f"Running dbt command: `{dbt_cli_invocation.dbt_command}`.")
return dbt_cli_invocation
@public
def wait(self) -> "DbtCliInvocation":
"""Wait for the dbt CLI process to complete.
Returns:
DbtCliInvocation: The current representation of the dbt CLI invocation.
Examples:
.. code-block:: python
from dagster_dbt import DbtCliResource
dbt = DbtCliResource(project_dir="/path/to/dbt/project")
dbt_cli_invocation = dbt.cli(["run"]).wait()
"""
list(self.stream_raw_events())
return self
@public
def is_successful(self) -> bool:
"""Return whether the dbt CLI process completed successfully.
Returns:
bool: True, if the dbt CLI process returns with a zero exit code, and False otherwise.
Examples:
.. code-block:: python
from dagster_dbt import DbtCliResource
dbt = DbtCliResource(project_dir="/path/to/dbt/project")
dbt_cli_invocation = dbt.cli(["run"], raise_on_error=False)
if dbt_cli_invocation.is_successful():
...
"""
self._stdout = list(self._stream_stdout())
return self.process.wait() == 0 and not self._error_messages
@public
def get_error(self) -> Optional[Exception]:
"""Return an exception if the dbt CLI process failed.
Returns:
Optional[Exception]: An exception if the dbt CLI process failed, and None otherwise.
Examples:
.. code-block:: python
from dagster_dbt import DbtCliResource
dbt = DbtCliResource(project_dir="/path/to/dbt/project")
dbt_cli_invocation = dbt.cli(["run"], raise_on_error=False)
error = dbt_cli_invocation.get_error()
if error:
logger.error(error)
"""
if self.is_successful():
return None
log_path = self.target_path.joinpath("dbt.log")
extra_description = ""
if log_path.exists():
extra_description = f", or view the dbt debug log: {log_path}"
return DagsterDbtCliRuntimeError(
description=(
f"The dbt CLI process with command\n\n"
f"`{self.dbt_command}`\n\n"
f"failed with exit code `{self.process.returncode}`."
" Check the stdout in the Dagster compute logs for the full information about"
f" the error{extra_description}.{self._format_error_messages()}"
),
)
def _stream_asset_events(
self,
) -> Iterator[DbtDagsterEventType]:
"""Stream the dbt CLI events and convert them to Dagster events."""
for event in self.stream_raw_events():
yield from event.to_default_asset_events(
manifest=self.manifest,
dagster_dbt_translator=self.dagster_dbt_translator,
context=self.context,
target_path=self.target_path,
project=self.project,
)
@public
def stream(
self,
) -> "DbtEventIterator[Union[Output, AssetMaterialization, AssetObservation, AssetCheckResult, AssetCheckEvaluation]]":
"""Stream the events from the dbt CLI process and convert them to Dagster events.
Returns:
Iterator[Union[Output, AssetMaterialization, AssetObservation, AssetCheckResult, AssetCheckEvaluation]]:
A set of corresponding Dagster events.
In a Dagster asset definition, the following are yielded:
- Output for refables (e.g. models, seeds, snapshots.)
- AssetCheckResult for dbt test results that are enabled as asset checks.
- AssetObservation for dbt test results that are not enabled as asset checks.
In a Dagster op definition, the following are yielded:
- AssetMaterialization refables (e.g. models, seeds, snapshots.)
- AssetCheckEvaluation for dbt test results that are enabled as asset checks.
- AssetObservation for dbt test results that are not enabled as asset checks.
Examples:
.. code-block:: python
from pathlib import Path
from dagster_dbt import DbtCliResource, dbt_assets
@dbt_assets(manifest=Path("target", "manifest.json"))
def my_dbt_assets(context, dbt: DbtCliResource):
yield from dbt.cli(["run"], context=context).stream()
"""
return DbtEventIterator(
self._stream_asset_events(),
self,
)
@public
def stream_raw_events(self) -> Iterator[DbtCliEventMessage]:
"""Stream the events from the dbt CLI process.
Returns:
Iterator[DbtCliEventMessage]: An iterator of events from the dbt CLI process.
"""
event_history_metadata_by_unique_id: dict[str, dict[str, Any]] = {}
for raw_event in self._stdout or self._stream_stdout():
if isinstance(raw_event, str):
# If we can't parse the event, then just emit it as a raw log.
sys.stdout.write(raw_event + "\n")
sys.stdout.flush()
continue
unique_id: Optional[str] = raw_event["data"].get("node_info", {}).get("unique_id")
if self.cli_version.major < 2:
event = DbtCoreCliEventMessage(raw_event=raw_event, event_history_metadata={})
else:
event = DbtFusionCliEventMessage(raw_event=raw_event, event_history_metadata={})
if unique_id and event.is_result_event:
event_history_metadata = copy.deepcopy(
event_history_metadata_by_unique_id.get(unique_id, {})
)
event = replace(event, event_history_metadata=event_history_metadata)
# Attempt to parse the column level metadata from the event message.
# If it exists, save it as historical metadata to attach to the NodeFinished event.
if event.raw_event["info"]["name"] == "JinjaLogInfo":
with contextlib.suppress(orjson.JSONDecodeError):
column_level_metadata = orjson.loads(event.raw_event["info"]["msg"])
event_history_metadata_by_unique_id[cast("str", unique_id)] = (
column_level_metadata
)
# Don't show this message in stdout
continue
# Re-emit the logs from dbt CLI process into stdout.
sys.stdout.write(str(event) + "\n")
sys.stdout.flush()
yield event
# Ensure that the dbt CLI process has completed.
self._raise_on_error()
@public
def get_artifact(
self,
artifact: Union[
Literal["manifest.json"],
Literal["catalog.json"],
Literal["run_results.json"],
Literal["sources.json"],
],
) -> dict[str, Any]:
"""Retrieve a dbt artifact from the target path.
See https://docs.getdbt.com/reference/artifacts/dbt-artifacts for more information.
Args:
artifact (Union[Literal["manifest.json"], Literal["catalog.json"], Literal["run_results.json"], Literal["sources.json"]]): The name of the artifact to retrieve.
Returns:
Dict[str, Any]: The artifact as a dictionary.
Examples:
.. code-block:: python
from dagster_dbt import DbtCliResource
dbt = DbtCliResource(project_dir="/path/to/dbt/project")
dbt_cli_invocation = dbt.cli(["run"]).wait()
# Retrieve the run_results.json artifact.
run_results = dbt_cli_invocation.get_artifact("run_results.json")
"""
artifact_path = self.target_path.joinpath(artifact)
return orjson.loads(artifact_path.read_bytes())
@property
def dbt_command(self) -> str:
"""The dbt CLI command that was invoked."""
return " ".join(cast("Sequence[str]", self.process.args))
def _stream_stdout(self) -> Iterator[Union[str, dict[str, Any]]]:
"""Stream the stdout from the dbt CLI process."""
try:
if not self.process.stdout or self.process.stdout.closed:
return
with self.process.stdout:
for raw_line in self.process.stdout or []:
raw_event_str = raw_line.decode().strip()
try:
raw_event = orjson.loads(raw_event_str)
# Parse the error message from the event, if it exists.
is_error_message = raw_event["info"]["level"] == "error"
if is_error_message:
self._error_messages.append(raw_event["info"]["msg"])
yield raw_event
except:
yield raw_event_str
except DagsterExecutionInterruptedError:
logger.info(f"Forwarding interrupt signal to dbt command: `{self.dbt_command}`.")
self.process.send_signal(signal.SIGINT)
self.process.wait(timeout=self.termination_timeout_seconds)
logger.info(f"dbt process terminated with exit code `{self.process.returncode}`.")
raise
def _format_error_messages(self) -> str:
"""Format the error messages from the dbt CLI process."""
if not self._error_messages:
return ""
return "\n\n".join(
[
"",
"Errors parsed from dbt logs:",
*self._error_messages,
]
)
def _raise_on_error(self) -> None:
"""Ensure that the dbt CLI process has completed. If the process has not successfully
completed, then optionally raise an error.
"""
logger.info(f"Finished dbt command: `{self.dbt_command}`.")
error = self.get_error()
if error and self.raise_on_error:
raise error
| DbtCliInvocation |
python | pytorch__pytorch | torch/_inductor/runtime/benchmarking.py | {
"start": 11472,
"end": 17551
} | class ____(TritonBenchmarker): # noqa: docstring_linter
@cached_property
def L2_cache_size(self: Self) -> int:
"""Get the L2 cache size, in bytes, of the current device."""
device = torch.cuda.current_device()
props = torch.cuda.get_device_properties(device)
return props.L2_cache_size
def get_event_pairs(
self: Self, iters: int
) -> list[tuple[torch.cuda.Event, torch.cuda.Event]]:
"""Get `iters` pairs of CUDA events."""
return [
(
torch.cuda.Event(enable_timing=True),
torch.cuda.Event(enable_timing=True),
)
for _ in range(iters)
]
def get_event_pairs_min_timing(
self: Self, event_pairs: list[tuple[torch.cuda.Event, torch.cuda.Event]]
) -> float:
"""Get the minimum timing, in milliseconds, for a group of CUDA event pairs."""
return min(
[
start_event.elapsed_time(end_event)
for start_event, end_event in event_pairs
]
)
@may_distort_benchmarking_result
@time_and_count
def benchmark_gpu( # type: ignore[override]
self: Self,
_callable: Callable[[], Any],
estimation_iters: int = 5,
memory_warmup_iters: int = 100,
benchmark_iters: int = 100,
max_benchmark_duration: int = 25,
return_mode: str = "min",
grad_to_none: list[torch.Tensor] | None = None,
is_vetted_benchmarking: bool = False,
**kwargs: Any,
) -> float | list[float]:
"""Benchmark a GPU callable using a custom benchmarking implementation.
Arguments:
- _callable: The callable to benchmark.
Keyword Arguments:
- estimation_iters: Optionally, the number of iterations to run `_callable`
during runtime estimation.
- memory_warmup_iters: Optionally, the number of iterations to flush the L2
cache before starting benchmarking.
- benchmark_iters: Optionally, the number of iterations to run `_callable`
during the benchmarking.
- max_benchmark_duration: Optionally, the maximum duration of the benchmarking,
in milliseconds. An estimated duration is calculated based on the values
of `memory_warmup_iters` and `benchmark_iters`, along with the estimated
runtime of `_callable` and various other factors, and we then shrink
`benchmark_iters` to fit in the allotted maximum duration.
- return_mode: Return mode for benchmark results. Options are "min" (default),
"all" (returns all measurements).
- grad_to_none: Optionally, a list of tensors whose gradients should be cleared
before each benchmark iteration.
- is_vetted_benchmarking: in deterministic mode, we only allow
benchmarking in vetted cases.
- **kwargs: Additional kwargs that may be passed to the fallback.
Returns:
- If return_mode="min": The minimum runtime of `_callable`, in milliseconds.
- If return_mode="all": List of all runtime measurements, in milliseconds.
"""
if not is_vetted_benchmarking:
may_ban_benchmarking()
# we don't want any outside errors propagating into benchmarking
torch.cuda.synchronize()
# warmup `_callable` (and catches any failures in the process)
_callable()
torch.cuda.synchronize()
# see https://github.com/triton-lang/triton/pull/840 for why `dtype=torch.int`
buffer = torch.empty(self.L2_cache_size // 4, dtype=torch.int, device="cuda")
buffer.zero_()
# estimate the runtime of `_callable`
event_pairs = self.get_event_pairs(estimation_iters)
for start_event, end_event in event_pairs:
# Clear gradients before timing (matches triton.testing.do_bench)
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
buffer.zero_()
start_event.record()
_callable()
end_event.record()
torch.cuda.synchronize()
estimated_timing = self.get_event_pairs_min_timing(event_pairs)
# adjust `benchmark_iters` to fit in the maximum benchmarking duration
benchmark_iters = max(
min(benchmark_iters, int(max_benchmark_duration // estimated_timing)), 1
)
# do the memory warmup
for _ in range(memory_warmup_iters):
buffer.zero_()
# benchmark `_callable`
event_pairs = self.get_event_pairs(benchmark_iters)
for start_event, end_event in event_pairs:
# Clear gradients before timing (matches triton.testing.do_bench)
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
buffer.zero_()
start_event.record()
_callable()
end_event.record()
torch.cuda.synchronize()
# explicitly delete the buffer, sometimes helps memory
# footprint metrics in OSS Inductor performance benchmarks
del buffer
# Return based on the requested mode
if return_mode == "all":
# Get all timings from event pairs
all_timings = [
start_event.elapsed_time(end_event)
for start_event, end_event in event_pairs
]
return all_timings
elif return_mode == "min":
benchmarked_timing = self.get_event_pairs_min_timing(event_pairs)
# return the minimum of `estimated_timing` and `benchmarked_timing`,
# we just want the minimum timing overall so we might as well check both
return min(estimated_timing, benchmarked_timing)
else:
raise ValueError(
f"Unsupported return_mode: {return_mode}. Use 'min' or 'all'."
)
benchmarker = (
InductorBenchmarker() if use_experimental_benchmarker else TritonBenchmarker()
)
| InductorBenchmarker |
python | doocs__leetcode | solution/1000-1099/1094.Car Pooling/Solution.py | {
"start": 0,
"end": 285
} | class ____:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
mx = max(e[2] for e in trips)
d = [0] * (mx + 1)
for x, f, t in trips:
d[f] += x
d[t] -= x
return all(s <= capacity for s in accumulate(d))
| Solution |
python | kamyu104__LeetCode-Solutions | Python/longest-increasing-subsequence.py | {
"start": 556,
"end": 1364
} | class ____(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
LIS = []
def insert(target):
left, right = 0, len(LIS) - 1
# Find the first index "left" which satisfies LIS[left] >= target
while left <= right:
mid = left + (right - left) // 2
if LIS[mid] >= target:
right = mid - 1
else:
left = mid + 1
# If not found, append the target.
if left == len(LIS):
LIS.append(target)
else:
LIS[left] = target
for num in nums:
insert(num)
return len(LIS)
# Time: O(nlogn)
# Space: O(n)
# bit, fenwick tree
| Solution2 |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 9333,
"end": 9405
} | class ____(HTTPClientError):
status_code = 422
| HTTPUnprocessableEntity |
python | pallets__jinja | src/jinja2/loaders.py | {
"start": 1029,
"end": 5490
} | class ____:
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with open(path) as f:
source = f.read()
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(
self, environment: "Environment", template: str
) -> tuple[str, str | None, t.Callable[[], bool] | None]:
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as a string. The filename should be the name of the
file on the filesystem if it was loaded from there, otherwise
``None``. The filename is used by Python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError(
f"{type(self).__name__} cannot provide access to the source"
)
raise TemplateNotFound(template)
def list_templates(self) -> list[str]:
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError("this loader cannot iterate over all templates")
@internalcode
def load(
self,
environment: "Environment",
name: str,
globals: t.MutableMapping[str, t.Any] | None = None,
) -> "Template":
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(
environment, code, globals, uptodate
)
| BaseLoader |
python | nedbat__coveragepy | coverage/types.py | {
"start": 4866,
"end": 5011
} | class ____(Protocol):
"""What all plugins have in common."""
_coverage_plugin_name: str
_coverage_enabled: bool
## Debugging
| TPlugin |
python | scrapy__scrapy | tests/test_scheduler.py | {
"start": 9921,
"end": 10121
} | class ____(Spider):
def __init__(self, start_urls):
self.start_urls = start_urls
super().__init__(name="StartUrlsSpider")
def parse(self, response):
pass
| StartUrlsSpider |
python | huggingface__transformers | utils/modular_model_converter.py | {
"start": 62923,
"end": 102853
} | class ____(ModuleMapper):
"""This is a Mapper to visit a modular file (like `modular_llama.py`). It visits the whole file, recording dependency,
then visits all imported modeling files (like `modeling_llama.py`), and manages their mutual dependencies.
Calling the method `create_modules()` after visit will create all modules based on this modular file.
"""
def __init__(self, python_module, new_name, package_name):
super().__init__(python_module)
# fmt: off
self.model_name = new_name # name of the model being defined. Should be in the format of `llama` or `layout_xlm` or `phi3`
self.model_specific_imported_objects: dict[str, str] = {} # e.g. {"LlamaModel": "transformers.models.llama.modeling_llama"}
self.model_specific_modules: dict[str, cst.Module] = {} # e.g. {"transformers.models.llama.modeling_llama": cst.Module}
self.all_all_to_add = {}
self.excluded_external_files = {} if package_name == "transformers" else EXCLUDED_EXTERNAL_FILES[package_name]
# fmt: on
def visit_ImportFrom(self, node: cst.ImportFrom) -> None:
"""When visiting imports from modeling files (i.e. `transformers.models.xxx`) we get the code, parse it,
and save it in `self.model_specific_modules` to later visit. The imported objects are saved in `self.model_specific_imported_objects`.
"""
# `node.module` is None for fully relative imports, e.g. `from ... import initialization as init`
import_module = self.python_module.code_for_node(node.module) if node.module is not None else ""
import_statement = "." * len(node.relative) + import_module
if any(import_to_skip in import_statement for import_to_skip in IMPORTS_TO_SKIP_IN_MODULAR):
return
if m.matches(node.module, m.Attribute()):
for imported_ in node.names:
# If we match here, it's an import from 3rd party lib that we need to skip
if any(external_file["name"] in import_statement for external_file in self.excluded_external_files):
continue
_import = re.search(
rf"(?:transformers\.models\.)|(?:\.\.\.models\.)|(?:\.\.)\w+\.({self.match_patterns}).*",
import_statement,
)
if _import:
source = _import.group(1)
if source == "modeling" and "Config" in self.python_module.code_for_node(imported_):
raise ValueError(
f"You are importing {self.python_module.code_for_node(imported_)} from the modeling file. Import from the `configuration_xxxx.py` file instead"
)
if import_module not in self.model_specific_modules:
if "models" not in import_module:
import_module = "models." + import_module
if not import_module.startswith("transformers"):
import_module = "transformers." + import_module
try:
source_code = get_module_source_from_name(import_module)
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
f"Failed to visit import from for: {self.python_module.code_for_node(node)}. Tried to import {import_module} but failed."
) from e
tree = cst.parse_module(source_code)
self.model_specific_modules[import_module] = tree
imported_object = self.python_module.code_for_node(imported_.name)
self.model_specific_imported_objects[imported_object] = import_module
if m.matches(node.module, m.Name()):
if "transformers" == import_module:
raise ValueError(
f"You are importing from {import_module} directly using global imports. Import from the correct local path"
)
def visit_SimpleStatementLine(self, node):
"""If we visit an import statement not previously visited, record it. If we visit a module-scope assignment,
simply record it or, if it is `__all__`, split it between files where we should dispatch it.
"""
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
simple_top_level_assign_structure = m.SimpleStatementLine(
body=[m.Assign(targets=[m.AssignTarget(target=m.Name())])]
)
simple_top_level_variable_indexing = m.SimpleStatementLine(
body=[m.Assign(targets=[m.AssignTarget(target=m.Subscript(value=m.Name()) | m.Attribute(value=m.Name()))])]
)
if m.matches(parent_node, m.Module()):
if m.matches(node, m.SimpleStatementLine(body=[m.Import()])):
self.imports.append(node)
elif m.matches(node, m.SimpleStatementLine(body=[m.ImportFrom()])):
# `node.body[0].module` is None for fully relative imports, e.g. `from ... import initialization as init`
import_module = (
self.python_module.code_for_node(node.body[0].module) if node.body[0].module is not None else ""
)
import_statement = "." * len(node.body[0].relative) + import_module
if any(
external_file["name"] in import_statement for external_file in self.excluded_external_files
) or not (
re.search(rf"(?:transformers\.models\.)|(?:\.\.)\w+\.({self.match_patterns}).*", import_statement)
and not any(import_to_skip in import_statement for import_to_skip in IMPORTS_TO_SKIP_IN_MODULAR)
):
self.imports.append(node)
elif m.matches(node, simple_top_level_assign_structure):
assigned_variable = node.body[0].targets[0].target.value
# __all__ is treated differently and not added to general assignments
if assigned_variable == "__all__":
self.all_all_to_add = split_all_assignment(node, self.model_name)
else:
self.current_assignment = assigned_variable
self.assignments[assigned_variable] = node
# This corresponds to a global variable being indexed or having an attribute look-up
elif m.matches(node, simple_top_level_variable_indexing):
indexed_variable = node.body[0].targets[0].target.value.value
# We should follow any dependencies relative to the variable being indexed
self.current_assignment = indexed_variable
# The indexing node should be directly added as a dependency of the indexed variable (register the node with a "fake" name)
node_name = self.python_module.code_for_node(node)
self.assignments[node_name] = node
self.object_dependency_mapping[indexed_variable].add(node_name)
def leave_Module(self, node):
"""When we leave the modular file, we do the following in order:
1. for each modeling file found in the imports, rename it with the new model name, visit it, and update
its dependency graph with the new function and assignment definitions found in the modular
2. update the modular dependency graph with the imported functions and assignments (found when visiting the matching files)
3. compute the nested (recursive) function and assignment dependencies
"""
# Takes care of finalizing our visit
super().leave_Module(node)
# 1. for each modeling file found in the imports, rename it with the new model name, visit it, and update dependencies
self.visited_modules = {}
self.renamers = {}
name_prefixes = self.infer_new_model_name()
for file, module in self.model_specific_modules.items():
file_model_name = file.split(".")[-2]
new_name = name_prefixes[file]
renamer = ReplaceNameTransformer(file_model_name, new_name, self.model_name)
renamed_module = module.visit(renamer)
self.visited_modules[file] = ModelFileMapper.visit_and_merge_dependencies(
renamed_module,
self.classes,
self.functions,
self.assignments,
self.object_dependency_mapping,
self.start_lines,
)
# We record it so that we can rename classes later the exact same way
self.renamers[file] = renamer
# 2. in turn, we need to add the imported functions/assignments to the dependencies of the modular mapper, using the
# definitions found in the visited files
self.merge_model_specific_imports(self.visited_modules)
# 3. compute the nested (recursive) function and assignment dependencies
self.object_recursive_dependency_mapping = self._compute_recursive_object_dependencies()
# We need to keep track of which objects were imported directly into which modeling file to not add them wrongly later
# Note that we may visit several of the same file types, thus we save them per file type, not file
self.imported_objects_per_file = defaultdict(set)
for file, mapper in self.visited_modules.items():
file_type = re.search(rf"^transformers\.models\.\w+\.({self.match_patterns})", file).group(1)
# If there are excluded external files, override the file type if there is a match
if self.excluded_external_files:
for excluded_file in self.excluded_external_files:
if file.split(".")[-1] == excluded_file["name"]:
file_type = excluded_file["type"]
break
self.imported_objects_per_file[file_type].update(mapper.objects_imported_from_modeling)
def merge_model_specific_imports(self, visited_modules):
"""Merge the functions and assignments imported from the modeling files to the modular nodes and dependency graph,
based on the visited files."""
self.start_lines_file_mapping = {}
self.added_objects_file_mapping = {}
for object_name, file in self.model_specific_imported_objects.items():
visited_module = visited_modules[file]
self.start_lines_file_mapping[file] = visited_module.start_lines
# Add functions and their dependencies
if object_name in visited_module.functions and object_name not in self.functions:
self.functions[object_name] = visited_module.functions[object_name]
self.added_objects_file_mapping[object_name] = file
dependencies = visited_module.object_dependency_mapping.get(object_name, None)
if dependencies is not None:
self.object_dependency_mapping[object_name] = dependencies
for dep in dependencies:
if dep not in self.global_nodes:
self.added_objects_file_mapping[dep] = file
self.functions[dep] = visited_module.global_nodes[dep]
# Add/overwrite the imported functions to other visited modules as well, in case it is absent/different
# in the modeling source file of the inherited class. See `examples/modular-tranformers/modular_switch_function.py`
# and `examples/modular-tranformers/modular_add_function.py` for examples
recursive_dependencies = visited_module.object_recursive_dependency_mapping.get(object_name, set())
node_recursive_dependencies_mapping = {
dep: visited_module.global_nodes[dep] for dep in recursive_dependencies
}
for filename, module_mapper in self.visited_modules.items():
if filename != file:
module_mapper.global_nodes[object_name] = visited_module.functions[object_name]
if len(recursive_dependencies) > 0:
module_mapper.object_recursive_dependency_mapping[object_name] = recursive_dependencies
module_mapper.global_nodes.update(node_recursive_dependencies_mapping)
# Add assignments and their dependencies
elif object_name in visited_module.assignments and object_name not in self.assignments:
self.assignments[object_name] = visited_module.assignments[object_name]
self.added_objects_file_mapping[object_name] = file
dependencies = visited_module.object_dependency_mapping.get(object_name, None)
if dependencies is not None:
self.object_dependency_mapping[object_name] = dependencies
for dep in dependencies:
if dep not in self.global_nodes:
self.added_objects_file_mapping[dep] = file
self.assignments[dep] = visited_module.global_nodes[dep]
# Do not forget to re-assign all nodes after the merge
self.global_nodes = {**self.assignments, **self.classes, **self.functions}
# And restric dependencies to those nodes only
self._restrict_dependencies_to_known_entities()
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]:
"""Compute in which relative order the `missing_dependencies` should appear when the nodes are added to the final file that
will be created based on the modular.
"""
relative_order = {}
idx = 0
original_dependencies = []
other_files_dependencies = defaultdict(list)
for dep in sorted(missing_dependencies):
if dep in self.added_objects_file_mapping:
file = self.added_objects_file_mapping[dep]
other_files_dependencies[file].append(dep)
else:
original_dependencies.append(dep)
# Sort all lists according to the order in their respective file
all_dependencies = []
for file, dependencies in other_files_dependencies.items():
sorted_dependencies = sorted(dependencies, key=lambda x: self.start_lines_file_mapping[file][x])
all_dependencies += sorted_dependencies
all_dependencies += sorted(original_dependencies, key=lambda x: self.start_lines[x])
# Add all original node first, then merged ones (one file at a time)
for dep in all_dependencies:
relative_order[dep] = idx
idx += 1
return relative_order
def infer_new_model_name(self) -> dict:
"""Infer whether we are using a model name prefix different from the usual model name as defined from the filename.
This is useful e.g. when we define a new multi-modal model, and only the text part inherits from `LlamaModel`,
so we have something like:
```python
class NewModelNameTextDecoderLayer(LlamaDecoderLayer):
pass
```
with the `Text` prefix added to the model name.
However, in case of multiple prefix used, we raise a warning and use the most frequent prefix, to avoid parsing
the same file multiple times and inconsistencies in the objects added from dependencies.
If the new prefix collides with a prefix of another class in the file where we are importing from, then we also
raise a warning, and use the default prefix (model name) to avoid collisions in dependencies.
"""
prefix_model_name_mapping = defaultdict(Counter)
cased_default_name = get_cased_name(self.model_name)
# Iterate over all new classes to get modeling super classes
for class_name, class_node in self.classes.items():
modeling_bases = [
k.value.value for k in class_node.bases if k.value.value in self.model_specific_imported_objects
]
if len(modeling_bases) > 1:
raise ValueError(
f"{class_name} was defined with more than 1 model-specific super class. This is unsupported. We found {(*modeling_bases,)}."
)
if len(modeling_bases) == 1:
filename = self.model_specific_imported_objects[modeling_bases[0]]
cased_model_name = cased_default_name # the default name prefix
suffix = common_partial_suffix(class_name, modeling_bases[0])
if len(suffix) > 0 and suffix[0].isupper():
cased_model_name = class_name.replace(suffix, "")
# If both the old model and new model share the last part of their name, is detected as a common
# suffix, but it should not be the case -> use the full name in this case
if len(cased_model_name) < len(cased_default_name) and cased_default_name in class_name:
cased_model_name = cased_default_name
prefix_model_name_mapping[filename].update([cased_model_name])
# Check if we found multiple prefixes for some modeling files
final_name_mapping = {}
for file, prefixes_counter in prefix_model_name_mapping.items():
if len(prefixes_counter) > 1:
_, total = prefixes_counter.most_common(1)[0]
most_used_entities = [name for name, count in prefixes_counter.most_common() if count == total]
# if the default name is in the pool of equally used prefixes, use it, otherwise last encountered
final_name = cased_default_name if cased_default_name in most_used_entities else most_used_entities[-1]
else:
final_name = list(prefixes_counter)[0]
# Check if the prefix can be used without collisions in the names
old_cased_model_name = get_cased_name(file.split(".")[-2])
old_model_name_prefix = final_name.replace(cased_default_name, old_cased_model_name)
# Raise adequate warning depending on the situation
has_prefix_collision = f"\nclass {old_model_name_prefix}" in get_module_source_from_name(file)
if final_name != cased_default_name and has_prefix_collision:
if len(prefixes_counter) > 1:
logger.warning(
f"We detected multiple prefix names when inheriting from {file}: {(*set(prefixes_counter),)}. However, the "
f"most used one, '{final_name}', is already present in the source file and will likely cause consistency "
f"issues. For this reason we fallback to the default prefix '{cased_default_name}' when grabbing args "
"and dependencies. Make sure to subclass the intermediate classes with the prefix you want (if different "
f"from '{cased_default_name}') or use a single prefix in all the modular (best)."
)
else:
logger.warning(
f"We detected the use of the new default prefix {final_name} when inheriting from {file}. However, it is "
"already present in the source file and will likely cause consistency issues. For this reason we fallback "
f"to the default prefix '{cased_default_name}' when grabbing args and dependencies. Make sure to subclass "
f"the intermediate classes with the prefix you want (if different from '{cased_default_name}')"
)
final_name = cased_default_name
elif len(prefixes_counter) > 1:
logger.warning(
f"We detected multiple prefix names when inheriting from {file}: {(*set(prefixes_counter),)}. We will only "
f"use the most used '{final_name}' prefix when grabbing args and dependencies. Make sure to subclass the "
f"intermediate classes with the prefix you want (if different from '{final_name}') or use a single prefix "
"in all the modular (best)."
)
final_name_mapping[file] = get_lowercase_name(final_name)
# Check we are not missing imported files
for file in self.model_specific_modules:
if file not in final_name_mapping:
final_name_mapping[file] = self.model_name
return final_name_mapping
def check_dependencies_and_create_import_node(
file_type: str, new_dependencies: set[str], mapper: ModuleMapper, new_name: str
) -> tuple[set[str], dict[str, cst.CSTNode]]:
"""Check that all class nodes in the `new_dependencies` belong to the correct `file_type`. If this is not the case,
we need to remove it from the dependencies, and create a new import to it instead.
This scenario may appear in the following case:
If a new class in the `modular_xxx.py` file does not belong to `type_xxx.py`, but is used somewhere in `other_type_xxx.py`
(e.g. as a type hint), but none of the visited files had a similar class, then it would be imported in `type_xxx.py` as
part of the standard dependency graph (because we never encountered an import towards this new class in any file).
For example imagine the following `modular.py`:
```
from ..llama.modeling_llama import LlamaModel
class NewNameTextConfig(PreTrainedConfig):
...
class NewNameConfig(PreTrainedConfig):
...
class NewNameModel(LlamaModel):
config = NewNameConfig()
text_config = NewNameTextConfig()
...
```
then without the help of this function, `NewNameTextConfig` would be imported in the `modeling_newname.py` as well as
`configuration_newname.py`, because `modeling_llama.py` tells us to not import `NewNameConfig`, but has no
knowledge of `NewNameTextConfig`.
"""
class_dependencies = {dep for dep in new_dependencies if m.matches(mapper.global_nodes[dep], m.ClassDef())}
corrected_dependencies = new_dependencies.copy()
new_imports = {}
for class_name in class_dependencies:
class_file_type = find_file_type(class_name, new_name)
# In this case, we need to remove it from the dependencies and create a new import instead
if class_file_type != file_type:
corrected_dependencies.remove(class_name)
import_statement = f"from .{class_file_type}_{new_name} import {class_name}"
new_imports[class_name] = cst.parse_statement(import_statement)
return corrected_dependencies, new_imports
def get_class_node_and_dependencies(
modular_mapper: ModularFileMapper, class_name: str, node: cst.CSTNode, files: dict[str, dict]
) -> tuple[dict, str, dict]:
"""Return a single class node (and all its dependency nodes), to be added to the `files`. It creates the new
class node based on the inherited classes if needed. Also returns any new imports of a new class defined in
the modular that we nay need.
"""
# An exception was already raised if this has len > 1
model_specific_bases = [
k.value.value for k in node.bases if k.value.value in modular_mapper.model_specific_imported_objects
]
super_class = model_specific_bases[0] if len(model_specific_bases) == 1 else None
file_type = find_file_type(class_name, modular_mapper.model_name)
file_to_update = files[file_type]
model_name = modular_mapper.model_name
# This is used to avoid adding objects to the dependencies graph if they will be imported already
imported_objects = modular_mapper.imported_objects_per_file[file_type]
# We need to replace the class node with the transformers (modeling file) super class node
if super_class is not None:
super_file_name = modular_mapper.model_specific_imported_objects[super_class]
# Get the mapper corresponding to the inherited class
mapper = modular_mapper.visited_modules[super_file_name]
# Rename the super class according to the exact same rule we used when renaming the whole module
renamer = modular_mapper.renamers[super_file_name]
renamed_super_class = preserve_case_replace(super_class, renamer.patterns, renamer.cased_new_name)
# Create the new class node
updated_node = replace_class_node(mapper, node, renamed_super_class, super_class)
# Grab all immediate dependencies of the new node
new_node_dependencies = augmented_dependencies_for_class_node(updated_node, mapper, imported_objects)
# At this point, if any class dependency is found, but belongs to another file, it means that we need to remove
# it from the dependencies, and add a new import of it instead
new_node_dependencies, new_imports = check_dependencies_and_create_import_node(
file_type, new_node_dependencies, mapper, model_name
)
# Remove all classes explicitly defined in modular from the dependencies. Otherwise, if a class is referenced
# before its new modular definition, it may be wrongly imported from elsewhere as a dependency if it matches
# another class from a modeling file after renaming, even though it would be added after anyway (leading to duplicates)
new_node_dependencies -= set(modular_mapper.classes.keys())
# The node was modified -> look for all recursive dependencies of the new node
all_dependencies_to_add = find_all_dependencies(
dependency_mapping=mapper.class_dependency_mapping,
initial_dependencies=new_node_dependencies,
initial_checked_dependencies=set(file_to_update.keys()),
)
relative_dependency_order = mapper.compute_relative_order(all_dependencies_to_add)
nodes_to_add = {
dep: (relative_dependency_order[dep], mapper.global_nodes[dep]) for dep in all_dependencies_to_add
}
# No transformers (modeling file) super class, just check functions and assignments dependencies
else:
updated_node = node
# The node was NOT modified -> no need to look recursively for other class dependencies. Indeed, even if they are not
# already defined (which would mean a weird order of the code in the modular...), they will be in the future
all_dependencies_to_add = augmented_dependencies_for_class_node(updated_node, modular_mapper, imported_objects)
# At this point, if any class dependency is found, but belongs to another file, it means that we need to remove
# it from the dependencies, and add a new import of it instead
all_dependencies_to_add, new_imports = check_dependencies_and_create_import_node(
file_type, all_dependencies_to_add, modular_mapper, model_name
)
relative_dependency_order = modular_mapper.compute_relative_order(all_dependencies_to_add)
nodes_to_add = {
dep: (relative_dependency_order[dep], modular_mapper.global_nodes[dep])
for dep in all_dependencies_to_add
if dep not in file_to_update
}
# Add the class node itself to the nodes to add
class_idx = max(relative_dependency_order.values()) + 1 if len(relative_dependency_order) > 0 else 0
nodes_to_add[class_name] = (class_idx, updated_node)
return nodes_to_add, file_type, new_imports
def create_modules(
modular_mapper: ModularFileMapper,
file_path: str | None = None,
package_name: str | None = "transformers",
) -> dict[str, cst.Module]:
"""Create all the new modules based on visiting the modular file. It replaces all classes as necessary."""
files = defaultdict(dict)
current_file_indices = defaultdict(lambda: 0)
# For each class defined in modular, potentially replace the node and add it with its dependencies
for class_name, node in modular_mapper.classes.items():
nodes_to_add, file_type, new_imports = get_class_node_and_dependencies(modular_mapper, class_name, node, files)
if package_name != "transformers":
# New imports involve new files like configuration_xxx.py, etc
# Those are imported with relative imports by default in the modeling file
# Since relative imports are Transformers imports at this point in the code, convert them to absolute imports from the source library (e.g. optimum-habana)
for key, new_import in new_imports.items():
new_imports[key] = new_import.with_changes(
body=[
convert_relative_import_to_absolute(
import_node=new_import.body[0], file_path=file_path, package_name=package_name
)
]
)
# Add the new potential new imports that we may need to the `modular_mapper` variable
modular_mapper.imported_objects_per_file[file_type].update(new_imports.keys())
modular_mapper.imports.extend(list(new_imports.values()))
# Sort the nodes according to their relative order
nodes_to_add = sorted(nodes_to_add.items(), key=lambda x: x[1][0])
# Write all nodes to file
for dependency, (_, node) in nodes_to_add:
# This is used to keep certain variables at the beginning of the file
try:
# The -1000 is arbitrary -> just keep it bigger than the list
idx = -1000 + VARIABLES_AT_THE_BEGINNING.index(dependency)
except ValueError:
idx = current_file_indices[file_type]
current_file_indices[file_type] += 1
files[file_type][dependency] = {"insert_idx": idx, "node": node}
# Add the __all__ statement to files at the end
for file_type, node in modular_mapper.all_all_to_add.items():
idx = current_file_indices[file_type]
files[file_type]["__all__"] = {"insert_idx": idx, "node": node}
# Aggregate all the imports statements (we look for duplicates with the code_for_node, not the nodes themselves because
# they are wrapped in SimpleStatementLine or If which could have different newlines, blanks etc)
all_imports = modular_mapper.imports.copy()
all_imports_code = {modular_mapper.python_module.code_for_node(node).strip() for node in all_imports}
for file, mapper in modular_mapper.visited_modules.items():
new_imports = [
node for node in mapper.imports if mapper.python_module.code_for_node(node).strip() not in all_imports_code
]
new_imports_code = {mapper.python_module.code_for_node(node).strip() for node in new_imports}
all_imports.extend(new_imports)
all_imports_code.update(new_imports_code)
# Find the correct imports, and write the new modules
for file, body in files.items():
new_body = [k[1]["node"] for k in sorted(body.items(), key=lambda x: x[1]["insert_idx"])]
needed_imports = get_needed_imports(body, all_imports)
if package_name != "transformers":
# Convert all transformers relative imports to absolute ones
for imp in needed_imports:
if m.matches(imp, m.SimpleStatementLine(body=[m.ImportFrom()])):
imp.body[0] = convert_relative_import_to_absolute(
import_node=imp.body[0], file_path=file_path, package_name="transformers"
)
full_module = needed_imports + new_body
new_module = cst.Module(body=full_module, header=modular_mapper.python_module.header)
files[file] = new_module
return files
def run_ruff(code, check=False):
if check:
command = ["ruff", "check", "-", "--fix", "--exit-zero"]
else:
command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, _ = process.communicate(input=code.encode())
return stdout.decode()
def convert_modular_file(modular_file: str, source_library: str | None = "transformers") -> dict[str, str]:
"""Convert a `modular_file` into all the different model-specific files it depicts."""
pattern = re.search(r"modular_(.*)(?=\.py$)", modular_file)
output = {}
if pattern is not None:
model_name = pattern.groups()[0]
# Parse the Python file
with open(modular_file, "r", encoding="utf-8") as file:
code = file.read()
module = cst.parse_module(code)
# Get relative path starting from src/transformers/
if source_library != "transformers":
relative_path = os.path.abspath(modular_file).replace("\\", "/")
else:
relative_path = re.search(
r"(src/transformers/.*|examples/.*)", os.path.abspath(modular_file).replace("\\", "/")
)
if relative_path is None:
raise ValueError(
f"Cannot find the relative path of {modular_file} inside this `transformers` repository. If this modular file is located in another repository and you would like to generate the modeling file there, use the `--external` flag."
)
relative_path = relative_path.group(1)
# Convert all source library relative imports to absolute ones
if source_library != "transformers":
module = module.visit(AbsoluteImportTransformer(relative_path, source_library))
wrapper = MetadataWrapper(module)
cst_transformers = ModularFileMapper(module, model_name, source_library)
wrapper.visit(cst_transformers)
for file, module in create_modules(
cst_transformers, file_path=relative_path, package_name=source_library
).items():
if module != {}:
if source_library != "transformers":
# Convert back all absolute imports from the source library to relative ones
module = module.visit(RelativeImportTransformer(relative_path, source_library))
header = AUTO_GENERATED_MESSAGE.format(
relative_path=relative_path, short_name=os.path.basename(relative_path)
)
ruffed_code = run_ruff(header + module.code, True)
formatted_code = run_ruff(ruffed_code, False)
output[file] = formatted_code
return output
else:
print(f"modular pattern not found in {modular_file}, exiting")
return {}
def save_modeling_files(modular_file: str, converted_files: dict[str, str]):
"""Save all the `converted_files` from the `modular_file`."""
for file_type in converted_files:
file_name_prefix = file_type.split(".*")[0]
file_name_suffix = file_type.split(".*")[-1] if ".*" in file_type else ""
new_file_name = modular_file.replace("modular_", f"{file_name_prefix}_").replace(
".py", f"{file_name_suffix}.py"
)
with open(new_file_name, "w", encoding="utf-8") as f:
f.write(converted_files[file_type])
def count_loc(file_path: str) -> int:
with open(file_path, "r", encoding="utf-8") as f:
code = f.read()
comment_less_code = re.sub(r"#.*", "", code).strip()
comment_less_code = re.sub(r" *\n", "\n", comment_less_code).strip()
return len([line for line in comment_less_code.split("\n") if line.strip()])
def run_converter(modular_file: str, source_library: str | None = "transformers"):
"""Convert a modular file, and save resulting files."""
print(f"Converting {modular_file} to a single model single file format")
converted_files = convert_modular_file(modular_file, source_library=source_library)
save_modeling_files(modular_file, converted_files)
model_directory = os.path.dirname(modular_file)
modular_loc = count_loc(modular_file)
autogenerated_files = []
for file in os.listdir(model_directory):
if file.endswith(".py") and not file.startswith("modular_"):
file_path = os.path.join(model_directory, file)
with open(file_path, "r", encoding="utf-8") as f:
if "This file was automatically generated from" in f.read():
autogenerated_files.append(file_path)
if autogenerated_files:
total_generated_loc = sum(count_loc(f) for f in autogenerated_files)
savings = total_generated_loc - modular_loc
percentage = (savings / total_generated_loc) * 100
print(
f"LoC: {modular_loc} (modular) vs {total_generated_loc} (generated) - saved {savings} LoC ({percentage:.1f}%)"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Same arg as both positional and optional, just for convenience
parser.add_argument(
"files",
nargs="*",
help="A list of `modular_xxxx` files that should be converted to single model file",
)
parser.add_argument(
"--files-to-parse",
"--files_to_parse",
"--files",
"-f",
default=["all"],
nargs="+",
help="A list of `modular_xxxx` files that should be converted to single model file",
)
parser.add_argument(
"--num_workers",
"-w",
default=-1,
type=int,
help="The number of workers to use. Default is -1, which means the number of CPU cores.",
)
parser.add_argument(
"--source-library",
type=str,
default="transformers",
help="The top-level package name (default: 'transformers')",
)
args = parser.parse_args()
# Both arg represent the same data, but as positional and optional
files_to_parse = args.files if len(args.files) > 0 else args.files_to_parse
num_workers = mp.cpu_count() if args.num_workers == -1 else args.num_workers
if files_to_parse == ["all"]:
files_to_parse = glob.glob("src/transformers/models/**/modular_*.py", recursive=True)
if files_to_parse == ["examples"]:
files_to_parse = glob.glob("examples/**/modular_*.py", recursive=True)
else:
for i, model_name in enumerate(files_to_parse):
if os.sep not in model_name:
full_path = os.path.join("src", "transformers", "models", model_name, f"modular_{model_name}.py")
# If it does not exist, try in the examples section
if not os.path.isfile(full_path):
full_path = os.path.join("examples", "modular-transformers", f"modular_{model_name}.py")
# We did not find it anywhere
if not os.path.isfile(full_path):
raise ValueError(f"Cannot find a modular file for {model_name}. Please provide the full path.")
files_to_parse[i] = full_path
# This finds the correct order in which we should convert the modular files, so that a model relying on another one
# is necessarily converted after its dependencies
ordered_files, _ = find_priority_list(files_to_parse)
if sum(len(level_files) for level_files in ordered_files) != len(files_to_parse):
raise ValueError(
"Some files will not be converted because they do not appear in the dependency graph."
"This usually means that at least one modular file does not import any model-specific class"
)
for dependency_level_files in ordered_files:
# Process files with diff
workers = min(num_workers, len(dependency_level_files))
with mp.Pool(workers) as pool:
pool.map(partial(run_converter, source_library=args.source_library), dependency_level_files)
| ModularFileMapper |
python | ray-project__ray | python/ray/tests/test_network_failure_e2e.py | {
"start": 7792,
"end": 8043
} | class ____:
def __init__(self):
self.count = 0
def inc(self):
self.count = self.count + 1
return self.count
@ray.method(max_task_retries=-1)
def get(self):
return self.count
@ray.remote(num_cpus=0.1, max_task_retries=-1)
| Counter |
python | facebook__pyre-check | tools/generate_taint_models/decorator_parser.py | {
"start": 483,
"end": 633
} | class ____:
name: str
arguments: Final[Optional[Set[str]]] = None
keywords: Final[Optional[Set[Tuple[Optional[str], str]]]] = None
| Decorator |
python | PyCQA__pylint | doc/data/messages/a/abstract-class-instantiated/good.py | {
"start": 101,
"end": 191
} | class ____(Animal):
def make_sound(self):
print("bhaaaaa")
sheep = Sheep()
| Sheep |
python | doocs__leetcode | solution/0400-0499/0403.Frog Jump/Solution.py | {
"start": 0,
"end": 442
} | class ____:
def canCross(self, stones: List[int]) -> bool:
@cache
def dfs(i, k):
if i == n - 1:
return True
for j in range(k - 1, k + 2):
if j > 0 and stones[i] + j in pos and dfs(pos[stones[i] + j], j):
return True
return False
n = len(stones)
pos = {s: i for i, s in enumerate(stones)}
return dfs(0, 0)
| Solution |
python | tox-dev__tox | src/tox/tox_env/python/virtual_env/package/cmd_builder.py | {
"start": 6101,
"end": 6238
} | class ____(VenvCmdBuilder, VirtualEnv):
@staticmethod
def id() -> str:
return "virtualenv-cmd-builder"
| VirtualEnvCmdBuilder |
python | pandas-dev__pandas | pandas/tests/series/methods/test_diff.py | {
"start": 136,
"end": 2714
} | class ____:
def test_diff_series_requires_integer(self):
series = Series(np.random.default_rng(2).standard_normal(2))
with pytest.raises(ValueError, match="periods must be an integer"):
series.diff(1.5)
def test_diff_np(self):
# TODO(__array_function__): could make np.diff return a Series
# matching ser.diff()
ser = Series(np.arange(5))
res = np.diff(ser)
expected = np.array([1, 1, 1, 1])
tm.assert_numpy_array_equal(res, expected)
def test_diff_int(self):
# int dtype
a = 10000000000000000
b = a + 1
ser = Series([a, b])
result = ser.diff()
assert result[1] == 1
def test_diff_tz(self):
# Combined datetime diff, normal diff and boolean diff test
ts = Series(
np.arange(10, dtype=np.float64),
index=date_range("2020-01-01", periods=10),
name="ts",
)
ts.diff()
# neg n
result = ts.diff(-1)
expected = ts - ts.shift(-1)
tm.assert_series_equal(result, expected)
# 0
result = ts.diff(0)
expected = ts - ts
tm.assert_series_equal(result, expected)
def test_diff_dt64(self):
# datetime diff (GH#3100)
ser = Series(date_range("20130102", periods=5))
result = ser.diff()
expected = ser - ser.shift(1)
tm.assert_series_equal(result, expected)
# timedelta diff
result = result - result.shift(1) # previous result
expected = expected.diff() # previously expected
tm.assert_series_equal(result, expected)
def test_diff_dt64tz(self):
# with tz
ser = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern", unit="ns"),
name="foo",
)
result = ser.diff()
expected = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, expected)
def test_diff_bool(self):
# boolean series (test for fixing #17294)
data = [False, True, True, False, False]
output = [np.nan, True, False, True, False]
ser = Series(data)
result = ser.diff()
expected = Series(output)
tm.assert_series_equal(result, expected)
def test_diff_object_dtype(self):
# object series
ser = Series([False, True, 5.0, np.nan, True, False])
result = ser.diff()
expected = ser - ser.shift(1)
tm.assert_series_equal(result, expected)
| TestSeriesDiff |
python | pytorch__pytorch | test/distributed/pipelining/test_schedule.py | {
"start": 38715,
"end": 40603
} | class ____(TestCase):
def test_generate_stage_to_rank_mapping(self):
stage_to_rank = generate_stage_to_rank_mapping(2, 2)
self.assertEqual(
stage_to_rank,
{
0: 0,
1: 1,
},
)
stage_to_rank = generate_stage_to_rank_mapping(2, 4)
self.assertEqual(stage_to_rank, {0: 0, 1: 1, 2: 0, 3: 1})
stage_to_rank = generate_stage_to_rank_mapping(4, 8)
self.assertEqual(
stage_to_rank, {0: 0, 1: 1, 2: 2, 3: 3, 4: 0, 5: 1, 6: 2, 7: 3}
)
stage_to_rank = generate_stage_to_rank_mapping(2, 4, style="v")
self.assertEqual(
stage_to_rank,
{
0: 0,
1: 1,
2: 1,
3: 0,
},
)
stage_to_rank = generate_stage_to_rank_mapping(4, 12, style="v")
self.assertEqual(
stage_to_rank,
{
0: 0,
1: 1,
2: 2,
3: 3,
4: 3,
5: 2,
6: 1,
7: 0,
8: 0,
9: 1,
10: 2,
11: 3,
},
)
stage_to_rank = generate_stage_to_rank_mapping(4, 16, style="v")
self.assertEqual(
stage_to_rank,
{
0: 0,
1: 1,
2: 2,
3: 3,
4: 3,
5: 2,
6: 1,
7: 0,
8: 0,
9: 1,
10: 2,
11: 3,
12: 3,
13: 2,
14: 1,
15: 0,
},
)
instantiate_parametrized_tests(TestScheduleLowering)
if __name__ == "__main__":
run_tests()
| ScheduleUtilTests |
python | getsentry__sentry | src/sentry/features/manager.py | {
"start": 5070,
"end": 17225
} | class ____(RegisteredFeatureManager):
def __init__(self) -> None:
super().__init__()
self._feature_registry: dict[str, type[Feature]] = {}
# Deprecated: Remove entity_features once flagr has been removed.
self.entity_features: set[str] = set()
self.exposed_features: set[str] = set()
self.flagpole_features: set[str] = set()
self._entity_handler: FeatureHandler | None = None
def all(
self, feature_type: type[Feature] = Feature, api_expose_only: bool = False
) -> dict[str, type[Feature]]:
"""
Get a mapping of feature name -> feature class, optionally specific to a
particular feature type.
:param feature_type: The feature class you want to filter by. eg. (OrganizationFeature | ProjectFeature | SystemFeature)
:param api_expose_only: Set to True to only fetch features that were registered with `api_expose`.
"""
return {
name: feature
for name, feature in self._feature_registry.items()
if issubclass(feature, feature_type)
and (not api_expose_only or name in self.exposed_features)
}
def add(
self,
name: str,
cls: type[Feature] = Feature,
entity_feature_strategy: bool | FeatureHandlerStrategy = False,
default: bool = False,
api_expose: bool = False,
) -> None:
"""
Register a feature.
The passed class is a Feature container object, which can be used
to encapsulate the context associated with a feature.
>>> FeatureManager.has('my:feature', actor=request.user)
Features that use flagpole will have an option automatically registered.
"""
entity_feature_strategy = self._shim_feature_strategy(entity_feature_strategy)
if entity_feature_strategy == FeatureHandlerStrategy.FLAGPOLE:
if name.startswith("users:"):
raise NotImplementedError("User flags not allowed with entity_feature=True")
self.entity_features.add(name)
# Register all flagpole features with options automator,
# so long as they haven't already been registered.
if (
entity_feature_strategy == FeatureHandlerStrategy.FLAGPOLE
and name not in self.flagpole_features
):
self.flagpole_features.add(name)
# Set a default of {} to ensure the feature evaluates to None when checked
feature_option_name = f"{FLAGPOLE_OPTION_PREFIX}.{name}"
options.register(
feature_option_name, type=Dict, default={}, flags=options.FLAG_AUTOMATOR_MODIFIABLE
)
if name not in settings.SENTRY_FEATURES:
settings.SENTRY_FEATURES[name] = default
self._feature_registry[name] = cls
if api_expose:
self.exposed_features.add(name)
def _get_feature_class(self, name: str) -> type[Feature]:
try:
return self._feature_registry[name]
except KeyError:
raise FeatureNotRegistered(name)
def get(self, name: str, *args: Any, **kwargs: Any) -> Feature:
"""
Lookup a registered feature context scope given the feature name.
>>> FeatureManager.get('my:feature', actor=request.user)
"""
cls = self._get_feature_class(name)
return cls(name, *args, **kwargs)
def add_entity_handler(self, handler: FeatureHandler) -> None:
"""
Registers a handler that doesn't require a feature name match
"""
self._entity_handler = handler
def has(self, name: str, *args: Any, skip_entity: bool | None = False, **kwargs: Any) -> bool:
"""
Determine if a feature is enabled. If a handler returns None, then the next
mechanism is used for feature checking.
Features are checked in the following order:
1. Execute registered feature handlers. Any
``feature.handler.FeatureHandler`` objects that have been registered
with ``add_handler` will be executed in the order they are declared.
When each handler is executed, should the handler return None
instead of True or False (feature enabled / disabled), the
next registered feature handler will be executed.
2. Check the entity handler, this handler doesn't check the handler registry,
and eventually the entity handler will replace the need to register handlers
for each feature.
TODO: When this replaces registered feature handlers, the functions for
registering and retrieving handlers should all be removed
3. The default configuration of the feature. This can be located in
sentry.conf.server.SENTRY_FEATURES.
Depending on the Feature class, additional arguments may need to be
provided to assign organization or project context to the feature.
>>> FeatureManager.has('organizations:feature', organization, actor=request.user)
"""
sample_rate = 0.01
try:
with metrics.timer("features.has", tags={"feature": name}, sample_rate=sample_rate):
actor = kwargs.pop("actor", None)
feature = self.get(name, *args, **kwargs)
# Check registered feature handlers
rv = self._get_handler(feature, actor)
if rv is not None:
metrics.incr(
"feature.has.result",
tags={"feature": name, "result": rv},
sample_rate=sample_rate,
)
record_feature_flag(name, rv)
return rv
if self._entity_handler and not skip_entity:
rv = self._entity_handler.has(feature, actor)
if rv is not None:
metrics.incr(
"feature.has.result",
tags={"feature": name, "result": rv},
sample_rate=sample_rate,
)
record_feature_flag(name, rv)
return rv
rv = settings.SENTRY_FEATURES.get(feature.name, False)
if rv is not None:
metrics.incr(
"feature.has.result",
tags={"feature": name, "result": rv},
sample_rate=sample_rate,
)
record_feature_flag(name, rv)
return rv
# Features are by default disabled if no plugin or default enables them
metrics.incr(
"feature.has.result",
tags={"feature": name, "result": False},
sample_rate=sample_rate,
)
record_feature_flag(name, False)
return False
except Exception as e:
if in_random_rollout("features.error.capture_rate"):
sentry_sdk.capture_exception(e)
record_feature_flag(name, False)
return False
def batch_has(
self,
feature_names: Sequence[str],
actor: User | RpcUser | AnonymousUser | None = None,
projects: Sequence[Project] | None = None,
organization: Organization | None = None,
) -> dict[str, dict[str, bool | None]] | None:
"""
Determine if multiple features are enabled. Unhandled flags will not be in
the results if they cannot be handled.
Will only accept one type of feature, either all ProjectFeatures or all
OrganizationFeatures.
"""
try:
if self._entity_handler:
with metrics.timer("features.entity_batch_has", sample_rate=0.01):
return self._entity_handler.batch_has(
feature_names, actor, projects=projects, organization=organization
)
else:
# Fall back to default handler if no entity handler available.
project_features = [name for name in feature_names if name.startswith("projects:")]
if projects and project_features:
results: dict[str, dict[str, bool | None]] = {}
for project in projects:
proj_results = results[f"project:{project.id}"] = {}
for feature_name in project_features:
proj_results[feature_name] = self.has(
feature_name, project, actor=actor
)
return results
org_features = filter(lambda name: name.startswith("organizations:"), feature_names)
if organization and org_features:
org_results: dict[str, bool | None] = {}
for feature_name in org_features:
org_results[feature_name] = self.has(
feature_name, organization, actor=actor
)
return {f"organization:{organization.id}": org_results}
unscoped_features = filter(
lambda name: not name.startswith("organizations:")
and not name.startswith("projects:"),
feature_names,
)
if unscoped_features:
unscoped_results: dict[str, bool | None] = {}
for feature_name in unscoped_features:
unscoped_results[feature_name] = self.has(feature_name, actor=actor)
return {"unscoped": unscoped_results}
return None
except Exception as e:
if in_random_rollout("features.error.capture_rate"):
sentry_sdk.capture_exception(e)
return None
def batch_has_for_organizations(
self, feature_name: str, organizations: Sequence[Organization]
) -> dict[str, bool] | None:
"""
Check the same set of feature flags for multiple organizations at once.
This method optimizes the case where you need to check the same features
for many different organizations by delegating to the entity handler if
available, or falling back to individual checks.
Args:
feature_names: List of feature names to check
organizations: List of organizations to check the features for
Returns:
Mapping from organization keys (format: "organization:{id}") to
feature name to result mapping.
"""
try:
if self._entity_handler and hasattr(
self._entity_handler, "batch_has_for_organizations"
):
with metrics.timer("features.batch_has_for_organizations", sample_rate=0.01):
return self._entity_handler.batch_has_for_organizations(
feature_name, organizations
)
else:
results: dict[str, bool] = {}
for organization in organizations:
org_key = f"organization:{organization.id}"
results[org_key] = self.has(feature_name, organization)
return results
except Exception as e:
if in_random_rollout("features.error.capture_rate"):
sentry_sdk.capture_exception(e)
return None
return None
@staticmethod
def _shim_feature_strategy(
entity_feature_strategy: bool | FeatureHandlerStrategy,
) -> FeatureHandlerStrategy:
"""
Shim layer for old API to register a feature until all the features have been converted
"""
if entity_feature_strategy is True:
return FeatureHandlerStrategy.FLAGPOLE
elif entity_feature_strategy is False:
return FeatureHandlerStrategy.INTERNAL
return entity_feature_strategy
| FeatureManager |
python | facebook__pyre-check | client/libcst_vendored_visitors/_gather_global_names.py | {
"start": 397,
"end": 2906
} | class ____(ContextAwareVisitor):
"""
Gathers all globally accessible names defined in a module and stores them as
attributes on the instance.
Intended to be instantiated and passed to a :class:`~libcst.Module`
:meth:`~libcst.CSTNode.visit` method in order to gather up information about
names defined on a module. Note that this is not a substitute for scope
analysis or qualified name support. Please see :ref:`libcst-scope-tutorial`
for a more robust way of determining the qualified name and definition for
an arbitrary node.
Names that are globally accessible through imports are currently not included
but can be retrieved with GatherImportsVisitor.
After visiting a module the following attributes will be populated:
global_names
A sequence of strings representing global variables defined in the module
toplevel.
class_names
A sequence of strings representing classes defined in the module toplevel.
function_names
A sequence of strings representing functions defined in the module toplevel.
"""
def __init__(self, context: CodemodContext) -> None:
super().__init__(context)
self.global_names: Set[str] = set()
self.class_names: Set[str] = set()
self.function_names: Set[str] = set()
# Track scope nesting
self.scope_depth: int = 0
def visit_ClassDef(self, node: libcst.ClassDef) -> None:
if self.scope_depth == 0:
self.class_names.add(node.name.value)
self.scope_depth += 1
def leave_ClassDef(self, original_node: libcst.ClassDef) -> None:
self.scope_depth -= 1
def visit_FunctionDef(self, node: libcst.FunctionDef) -> None:
if self.scope_depth == 0:
self.function_names.add(node.name.value)
self.scope_depth += 1
def leave_FunctionDef(self, original_node: libcst.FunctionDef) -> None:
self.scope_depth -= 1
def visit_Assign(self, node: libcst.Assign) -> None:
if self.scope_depth != 0:
return
for assign_target in node.targets:
target = assign_target.target
if isinstance(target, libcst.Name):
self.global_names.add(target.value)
def visit_AnnAssign(self, node: libcst.AnnAssign) -> None:
if self.scope_depth != 0:
return
target = node.target
if isinstance(target, libcst.Name):
self.global_names.add(target.value)
| GatherGlobalNamesVisitor |
python | qdrant__qdrant-client | qdrant_client/http/api/beta_api.py | {
"start": 2061,
"end": 2474
} | class ____(_BetaApi):
async def clear_issues(
self,
) -> bool:
"""
Removes all issues reported so far
"""
return await self._build_for_clear_issues()
async def get_issues(
self,
) -> object:
"""
Get a report of performance issues and configuration suggestions
"""
return await self._build_for_get_issues()
| AsyncBetaApi |
python | kamyu104__LeetCode-Solutions | Python/brace-expansion-ii.py | {
"start": 163,
"end": 1328
} | class ____(object):
def braceExpansionII(self, expression):
"""
:type expression: str
:rtype: List[str]
"""
def form_words(options):
words = map("".join, itertools.product(*options))
words.sort()
return words
def generate_option(expr, i):
option_set = set()
while i[0] != len(expr) and expr[i[0]] != "}":
i[0] += 1 # { or ,
for option in generate_words(expr, i):
option_set.add(option)
i[0] += 1 # }
option = list(option_set)
option.sort()
return option
def generate_words(expr, i):
options = []
while i[0] != len(expr) and expr[i[0]] not in ",}":
tmp = []
if expr[i[0]] not in "{,}":
tmp.append(expr[i[0]])
i[0] += 1 # a-z
elif expr[i[0]] == "{":
tmp = generate_option(expr, i)
options.append(tmp)
return form_words(options)
return generate_words(expression, [0])
| Solution |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 796,
"end": 2255
} | class ____(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| BaseModelOutput |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF066.py | {
"start": 1185,
"end": 1316
} | class ____(typing.Protocol): # Tests for a Protocol class
@property
def prop1(self) -> int: ... # OK: A stub property
| Proto |
python | huggingface__transformers | src/transformers/models/apertus/modeling_apertus.py | {
"start": 3378,
"end": 9729
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: ApertusConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[ApertusConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| ApertusRotaryEmbedding |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_os_login.py | {
"start": 5687,
"end": 6694
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestOSLoginHookMissingProjectIdHook |
python | kubernetes-client__python | kubernetes/client/models/v1_container_extended_resource_request.py | {
"start": 383,
"end": 6378
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container_name': 'str',
'request_name': 'str',
'resource_name': 'str'
}
attribute_map = {
'container_name': 'containerName',
'request_name': 'requestName',
'resource_name': 'resourceName'
}
def __init__(self, container_name=None, request_name=None, resource_name=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerExtendedResourceRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container_name = None
self._request_name = None
self._resource_name = None
self.discriminator = None
self.container_name = container_name
self.request_name = request_name
self.resource_name = resource_name
@property
def container_name(self):
"""Gets the container_name of this V1ContainerExtendedResourceRequest. # noqa: E501
The name of the container requesting resources. # noqa: E501
:return: The container_name of this V1ContainerExtendedResourceRequest. # noqa: E501
:rtype: str
"""
return self._container_name
@container_name.setter
def container_name(self, container_name):
"""Sets the container_name of this V1ContainerExtendedResourceRequest.
The name of the container requesting resources. # noqa: E501
:param container_name: The container_name of this V1ContainerExtendedResourceRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and container_name is None: # noqa: E501
raise ValueError("Invalid value for `container_name`, must not be `None`") # noqa: E501
self._container_name = container_name
@property
def request_name(self):
"""Gets the request_name of this V1ContainerExtendedResourceRequest. # noqa: E501
The name of the request in the special ResourceClaim which corresponds to the extended resource. # noqa: E501
:return: The request_name of this V1ContainerExtendedResourceRequest. # noqa: E501
:rtype: str
"""
return self._request_name
@request_name.setter
def request_name(self, request_name):
"""Sets the request_name of this V1ContainerExtendedResourceRequest.
The name of the request in the special ResourceClaim which corresponds to the extended resource. # noqa: E501
:param request_name: The request_name of this V1ContainerExtendedResourceRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and request_name is None: # noqa: E501
raise ValueError("Invalid value for `request_name`, must not be `None`") # noqa: E501
self._request_name = request_name
@property
def resource_name(self):
"""Gets the resource_name of this V1ContainerExtendedResourceRequest. # noqa: E501
The name of the extended resource in that container which gets backed by DRA. # noqa: E501
:return: The resource_name of this V1ContainerExtendedResourceRequest. # noqa: E501
:rtype: str
"""
return self._resource_name
@resource_name.setter
def resource_name(self, resource_name):
"""Sets the resource_name of this V1ContainerExtendedResourceRequest.
The name of the extended resource in that container which gets backed by DRA. # noqa: E501
:param resource_name: The resource_name of this V1ContainerExtendedResourceRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource_name is None: # noqa: E501
raise ValueError("Invalid value for `resource_name`, must not be `None`") # noqa: E501
self._resource_name = resource_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerExtendedResourceRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerExtendedResourceRequest):
return True
return self.to_dict() != other.to_dict()
| V1ContainerExtendedResourceRequest |
python | sympy__sympy | sympy/polys/rootisolation.py | {
"start": 54707,
"end": 64375
} | class ____:
"""A fully qualified representation of a complex isolation interval.
The printed form is shown as (ax, bx) x (ay, by) where (ax, ay)
and (bx, by) are the coordinates of the southwest and northeast
corners of the interval's rectangle, respectively.
Examples
========
>>> from sympy import CRootOf, S
>>> from sympy.abc import x
>>> CRootOf.clear_cache() # for doctest reproducibility
>>> root = CRootOf(x**10 - 2*x + 3, 9)
>>> i = root._get_interval(); i
(3/64, 3/32) x (9/8, 75/64)
The real part of the root lies within the range [0, 3/4] while
the imaginary part lies within the range [9/8, 3/2]:
>>> root.n(3)
0.0766 + 1.14*I
The width of the ranges in the x and y directions on the complex
plane are:
>>> i.dx, i.dy
(3/64, 3/64)
The center of the range is
>>> i.center
(9/128, 147/128)
The northeast coordinate of the rectangle bounding the root in the
complex plane is given by attribute b and the x and y components
are accessed by bx and by:
>>> i.b, i.bx, i.by
((3/32, 75/64), 3/32, 75/64)
The southwest coordinate is similarly given by i.a
>>> i.a, i.ax, i.ay
((3/64, 9/8), 3/64, 9/8)
Although the interval prints to show only the real and imaginary
range of the root, all the information of the underlying root
is contained as properties of the interval.
For example, an interval with a nonpositive imaginary range is
considered to be the conjugate. Since the y values of y are in the
range [0, 1/4] it is not the conjugate:
>>> i.conj
False
The conjugate's interval is
>>> ic = i.conjugate(); ic
(3/64, 3/32) x (-75/64, -9/8)
NOTE: the values printed still represent the x and y range
in which the root -- conjugate, in this case -- is located,
but the underlying a and b values of a root and its conjugate
are the same:
>>> assert i.a == ic.a and i.b == ic.b
What changes are the reported coordinates of the bounding rectangle:
>>> (i.ax, i.ay), (i.bx, i.by)
((3/64, 9/8), (3/32, 75/64))
>>> (ic.ax, ic.ay), (ic.bx, ic.by)
((3/64, -75/64), (3/32, -9/8))
The interval can be refined once:
>>> i # for reference, this is the current interval
(3/64, 3/32) x (9/8, 75/64)
>>> i.refine()
(3/64, 3/32) x (9/8, 147/128)
Several refinement steps can be taken:
>>> i.refine_step(2) # 2 steps
(9/128, 3/32) x (9/8, 147/128)
It is also possible to refine to a given tolerance:
>>> tol = min(i.dx, i.dy)/2
>>> i.refine_size(tol)
(9/128, 21/256) x (9/8, 291/256)
A disjoint interval is one whose bounding rectangle does not
overlap with another. An interval, necessarily, is not disjoint with
itself, but any interval is disjoint with a conjugate since the
conjugate rectangle will always be in the lower half of the complex
plane and the non-conjugate in the upper half:
>>> i.is_disjoint(i), i.is_disjoint(i.conjugate())
(False, True)
The following interval j is not disjoint from i:
>>> close = CRootOf(x**10 - 2*x + 300/S(101), 9)
>>> j = close._get_interval(); j
(75/1616, 75/808) x (225/202, 1875/1616)
>>> i.is_disjoint(j)
False
The two can be made disjoint, however:
>>> newi, newj = i.refine_disjoint(j)
>>> newi
(39/512, 159/2048) x (2325/2048, 4653/4096)
>>> newj
(3975/51712, 2025/25856) x (29325/25856, 117375/103424)
Even though the real ranges overlap, the imaginary do not, so
the roots have been resolved as distinct. Intervals are disjoint
when either the real or imaginary component of the intervals is
distinct. In the case above, the real components have not been
resolved (so we do not know, yet, which root has the smaller real
part) but the imaginary part of ``close`` is larger than ``root``:
>>> close.n(3)
0.0771 + 1.13*I
>>> root.n(3)
0.0766 + 1.14*I
"""
def __init__(self, a, b, I, Q, F1, F2, f1, f2, dom, conj=False):
"""Initialize new complex interval with complete information. """
# a and b are the SW and NE corner of the bounding interval,
# (ax, ay) and (bx, by), respectively, for the NON-CONJUGATE
# root (the one with the positive imaginary part); when working
# with the conjugate, the a and b value are still non-negative
# but the ay, by are reversed and have oppositite sign
self.a, self.b = a, b
self.I, self.Q = I, Q
self.f1, self.F1 = f1, F1
self.f2, self.F2 = f2, F2
self.dom = dom
self.conj = conj
@property
def func(self):
return ComplexInterval
@property
def args(self):
i = self
return (i.a, i.b, i.I, i.Q, i.F1, i.F2, i.f1, i.f2, i.dom, i.conj)
def __eq__(self, other):
if type(other) is not type(self):
return False
return self.args == other.args
@property
def ax(self):
"""Return ``x`` coordinate of south-western corner. """
return self.a[0]
@property
def ay(self):
"""Return ``y`` coordinate of south-western corner. """
if not self.conj:
return self.a[1]
else:
return -self.b[1]
@property
def bx(self):
"""Return ``x`` coordinate of north-eastern corner. """
return self.b[0]
@property
def by(self):
"""Return ``y`` coordinate of north-eastern corner. """
if not self.conj:
return self.b[1]
else:
return -self.a[1]
@property
def dx(self):
"""Return width of the complex isolating interval. """
return self.b[0] - self.a[0]
@property
def dy(self):
"""Return height of the complex isolating interval. """
return self.b[1] - self.a[1]
@property
def center(self):
"""Return the center of the complex isolating interval. """
return ((self.ax + self.bx)/2, (self.ay + self.by)/2)
@property
def max_denom(self):
"""Return the largest denominator occurring in either endpoint. """
return max(self.ax.denominator, self.bx.denominator,
self.ay.denominator, self.by.denominator)
def as_tuple(self):
"""Return tuple representation of the complex isolating
interval's SW and NE corners, respectively. """
return ((self.ax, self.ay), (self.bx, self.by))
def __repr__(self):
return "(%s, %s) x (%s, %s)" % (self.ax, self.bx, self.ay, self.by)
def conjugate(self):
"""This complex interval really is located in lower half-plane. """
return ComplexInterval(self.a, self.b, self.I, self.Q,
self.F1, self.F2, self.f1, self.f2, self.dom, conj=True)
def __contains__(self, item):
"""
Say whether a complex number belongs to this complex rectangular
region.
Parameters
==========
item : pair (re, im) or number re
Either a pair giving the real and imaginary parts of the number,
or else a real number.
"""
if isinstance(item, tuple):
re, im = item
else:
re, im = item, 0
return self.ax <= re <= self.bx and self.ay <= im <= self.by
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if isinstance(other, RealInterval):
return other.is_disjoint(self)
if self.conj != other.conj: # above and below real axis
return True
re_distinct = (self.bx < other.ax or other.bx < self.ax)
if re_distinct:
return True
im_distinct = (self.by < other.ay or other.by < self.ay)
return im_distinct
def _inner_refine(self):
"""Internal one step complex root refinement procedure. """
(u, v), (s, t) = self.a, self.b
I, Q = self.I, self.Q
f1, F1 = self.f1, self.F1
f2, F2 = self.f2, self.F2
dom = self.dom
if s - u > t - v:
D_L, D_R = _vertical_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_L[0] == 1:
_, a, b, I, Q, F1, F2 = D_L
else:
_, a, b, I, Q, F1, F2 = D_R
else:
D_B, D_U = _horizontal_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_B[0] == 1:
_, a, b, I, Q, F1, F2 = D_B
else:
_, a, b, I, Q, F1, F2 = D_U
return ComplexInterval(a, b, I, Q, F1, F2, f1, f2, dom, self.conj)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx, dy=None):
"""Refine an isolating interval until it is of sufficiently small size. """
if dy is None:
dy = dx
expr = self
while not (expr.dx < dx and expr.dy < dy):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of complex root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of complex root refinement algorithm. """
return self._inner_refine()
| ComplexInterval |
python | fabric__fabric | tests/main.py | {
"start": 1114,
"end": 14540
} | class ____:
class core_program_behavior:
def version_output_contains_our_name_plus_deps(self):
expect(
"--version",
r"""
Fabric .+
Paramiko .+
Invoke .+
""".strip(),
test="regex",
)
def help_output_says_fab(self):
expect("--help", "Usage: fab", test="contains")
def exposes_hosts_flag_in_help(self):
expect("--help", "-H STRING, --hosts=STRING", test="contains")
def executes_remainder_as_anonymous_task(self, remote):
remote.expect(host="myhost", cmd="whoami")
make_program().run("fab -H myhost -- whoami", exit=False)
@patch("paramiko.agent.Agent.get_keys")
def can_list_agent_keys(self, get_keys):
agent_keys = []
for type_, bits, comment in (
("ecdsa", b"dummy", "woody"),
("rsa", b"ventriloquist", "bob"),
("ed25519", b"stagehand", "smith"),
):
# Looks like a pubkey blob from an agent
m = Message()
m.add_string(type_)
m.add_string(bits)
agent_keys.append(
AgentKey(agent=None, blob=bytes(m), comment=comment)
)
get_keys.return_value = agent_keys
expected = """
0 SHA256:r7SOU1pAlEWmRE57Swf0OQHg9tlYicKaLx2DxGbDVk8 woody (ECDSA)
0 SHA256:2qZYGN+eIVfmhwpQUMje7uG4+7tZquM5LBwNaHCBsqg bob (RSA)
0 SHA256:4seJT+aN1aTPIudGupnXsZ1z20r+GCIAAKEA4MHnwvA smith (ED25519)
""".lstrip()
expect("--list-agent-keys", expected)
def uses_FABRIC_env_prefix(self, environ):
environ["FABRIC_RUN_ECHO"] = "1"
with cd(support):
make_program().run("fab expect-from-env")
def basic_pre_and_post_tasks_still_work(self):
with cd(support):
# Safety
expect("first", "First!\n")
expect("third", "Third!\n")
# Real test
expect("second", "First!\nSecond!\nThird!\n")
class filenames:
def loads_fabfile_not_tasks(self):
"Loads fabfile.py, not tasks.py"
with cd(support):
expect(
"--list",
"""
Available tasks:
basic-run
build
deploy
expect-connect-timeout
expect-from-env
expect-identities
expect-identity
expect-mutation
expect-mutation-to-fail
expect-vanilla-Context
first
hosts-are-host-stringlike
hosts-are-init-kwargs
hosts-are-mixed-values
hosts-are-myhost
mutate
second
third
two-hosts
vanilla-Task-works-ok
""".lstrip(),
)
def loads_fabric_config_files_not_invoke_ones(self):
for type_ in ("yaml", "yml", "json", "py"):
with cd(os.path.join(support, "{}_conf".format(type_))):
# This task, in each subdir, expects data present in a
# fabric.<ext> nearby to show up in the config.
make_program().run("fab expect-conf-value")
class runtime_ssh_config_path:
def _run(
self,
flag="-S",
file_="ssh_config/runtime.conf",
tasks="runtime-ssh-config",
):
with cd(support):
# Relies on asserts within the task, which will bubble up as
# it's executed in-process
cmd = "fab -c runtime_fabfile {} {} -H runtime {}"
make_program().run(cmd.format(flag, file_, tasks))
def capital_F_flag_specifies_runtime_ssh_config_file(self):
self._run(flag="-S")
def long_form_flag_also_works(self):
self._run(flag="--ssh-config")
@raises(IOError)
def IOErrors_if_given_missing_file(self):
self._run(file_="nope/nothere.conf")
@patch.object(Config, "_load_ssh_file")
def config_only_loaded_once_per_session(self, method):
# Task that doesn't make assertions about the config (since the
# _actual_ config it gets is empty as we had to mock out the loader
# method...sigh)
self._run(tasks="dummy dummy")
# Called only once (initial __init__) with runtime conf, instead of
# that plus a few more pairs of calls against the default files
# (which is what happens when clone() isn't preserving the
# already-parsed/loaded SSHConfig)
method.assert_called_once_with("ssh_config/runtime.conf")
class hosts_flag_parameterizes_tasks:
# NOTE: many of these just rely on MockRemote's builtin
# "channel.exec_command called with given command string" asserts.
def single_string_is_single_host_and_single_exec(self, remote):
remote.expect(host="myhost", cmd="nope")
# In addition to just testing a base case, this checks for a really
# dumb bug where one appends to, instead of replacing, the task
# list during parameterization/expansion XD
with cd(support):
make_program().run("fab -H myhost basic-run")
def comma_separated_string_is_multiple_hosts(self, remote):
remote.expect_sessions(
Session("host1", cmd="nope"), Session("host2", cmd="nope")
)
with cd(support):
make_program().run("fab -H host1,host2 basic-run")
def multiple_hosts_works_with_remainder_too(self, remote):
remote.expect_sessions(
Session("host1", cmd="whoami"), Session("host2", cmd="whoami")
)
make_program().run("fab -H host1,host2 -- whoami")
def host_string_shorthand_is_passed_through(self, remote):
remote.expect(host="host1", port=1234, user="someuser")
make_program().run("fab -H someuser@host1:1234 -- whoami")
# NOTE: no mocking because no actual run() under test, only
# parameterization
# TODO: avoiding for now because implementing this requires more work
# at the Invoke level re: deciding when to _not_ pass in the
# session-global config object (Executor's self.config). At the moment,
# our threading-concurrency API is oriented around Group, and we're not
# using it for --hosts, so it's not broken...yet.
@pytest.mark.skip
def config_mutation_not_preserved(self):
with cd(support):
make_program().run(
"fab -H host1,host2 expect-mutation-to-fail"
)
@trap
def pre_post_tasks_are_not_parameterized_across_hosts(self):
with cd(support):
make_program().run(
"fab -H hostA,hostB,hostC second --show-host"
)
output = sys.stdout.getvalue()
# Expect pre once, 3x main, post once, as opposed to e.g. both
# pre and main task
expected = """
First!
Second: hostA
Second: hostB
Second: hostC
Third!
""".lstrip()
assert output == expected
class hosts_task_arg_parameterizes_tasks:
# NOTE: many of these just rely on MockRemote's builtin
# "channel.exec_command called with given command string" asserts.
def single_string_is_single_exec(self, remote):
remote.expect(host="myhost", cmd="nope")
with cd(support):
make_program().run("fab hosts-are-myhost")
def multiple_strings_is_multiple_host_args(self, remote):
remote.expect_sessions(
Session("host1", cmd="nope"), Session("host2", cmd="nope")
)
with cd(support):
make_program().run("fab two-hosts")
def host_string_shorthand_works_ok(self, remote):
remote.expect(host="host1", port=1234, user="someuser")
with cd(support):
make_program().run("fab hosts-are-host-stringlike")
def may_give_Connection_init_kwarg_dicts(self, remote):
remote.expect_sessions(
Session("host1", user="admin", cmd="nope"),
Session("host2", cmd="nope"),
)
with cd(support):
make_program().run("fab hosts-are-init-kwargs")
def may_give_mixed_value_types(self, remote):
remote.expect_sessions(
Session("host1", user="admin", cmd="nope"),
Session("host2", cmd="nope"),
)
with cd(support):
make_program().run("fab hosts-are-mixed-values")
class no_hosts_flag_or_task_arg:
def calls_task_once_with_invoke_context(self):
with cd(support):
make_program().run("fab expect-vanilla-Context")
def vanilla_Invoke_task_works_too(self):
with cd(support):
make_program().run("fab vanilla-Task-works-ok")
@raises(NothingToDo)
def generates_exception_if_combined_with_remainder(self):
make_program().run("fab -- nope")
def invokelike_multitask_invocation_preserves_config_mutation(self):
# Mostly a guard against Executor subclass tweaks breaking Invoke
# behavior added in pyinvoke/invoke#309
with cd(support):
make_program().run("fab mutate expect-mutation")
class connect_timeout:
def dash_t_supplies_default_connect_timeout(self):
with cd(support):
make_program().run("fab -t 5 expect-connect-timeout")
def double_dash_connect_timeout_also_works(self):
with cd(support):
make_program().run(
"fab --connect-timeout 5 expect-connect-timeout"
)
class runtime_identity_file:
def dash_i_supplies_default_connect_kwarg_key_filename(self):
# NOTE: the expect-identity task in tests/_support/fabfile.py
# performs asserts about its context's .connect_kwargs value,
# relying on other tests to prove connect_kwargs makes its way into
# that context.
with cd(support):
make_program().run("fab -i identity.key expect-identity")
def double_dash_identity_also_works(self):
with cd(support):
make_program().run(
"fab --identity identity.key expect-identity"
)
def may_be_given_multiple_times(self):
with cd(support):
make_program().run(
"fab -i identity.key -i identity2.key expect-identities"
)
class secrets_prompts:
@patch("fabric.main.getpass.getpass")
def _expect_prompt(self, getpass, flag, key, value, prompt):
getpass.return_value = value
with cd(support):
# Expect that the given key was found in the context.
cmd = "fab -c prompting {} expect-connect-kwarg --key {} --val {}" # noqa
make_program().run(cmd.format(flag, key, value))
# Then we also expect that getpass was called w/ expected prompt
getpass.assert_called_once_with(prompt)
def password_prompt_updates_connect_kwargs(self):
self._expect_prompt(
flag="--prompt-for-login-password",
key="password",
value="mypassword",
prompt="Enter login password for use with SSH auth: ",
)
def passphrase_prompt_updates_connect_kwargs(self):
self._expect_prompt(
flag="--prompt-for-passphrase",
key="passphrase",
value="mypassphrase",
prompt="Enter passphrase for use unlocking SSH keys: ",
)
class configuration_updating_and_merging:
def key_filename_can_be_set_via_non_override_config_levels(self):
# Proves/protects against #1762, where eg key_filenames gets
# 'reset' to an empty list. Arbitrarily uses the 'yml' level of
# test fixtures, which has a fabric.yml w/ a
# connect_kwargs.key_filename value of [private.key, other.key].
with cd(os.path.join(support, "yml_conf")):
make_program().run("fab expect-conf-key-filename")
def cli_identity_still_overrides_when_non_empty(self):
with cd(os.path.join(support, "yml_conf")):
make_program().run("fab -i cli.key expect-cli-key-filename")
class completion:
# NOTE: most completion tests are in Invoke too; this is just an
# irritating corner case driven by Fabric's 'remainder' functionality.
@trap
def complete_flag_does_not_trigger_remainder_only_behavior(self):
# When bug present, 'fab --complete -- fab' fails to load any
# collections because it thinks it's in remainder-only,
# work-without-a-collection mode.
with cd(support):
make_program().run("fab --complete -- fab", exit=False)
# Cherry-picked safety checks looking for tasks from fixture
# fabfile
output = sys.stdout.getvalue()
for name in ("build", "deploy", "expect-from-env"):
assert name in output
| Fab_ |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 448,
"end": 1463
} | class ____(nodes.Node):
"""Node which supports translation.
The translation goes forward with following steps:
1. Preserve original translatable messages
2. Apply translated messages from message catalog
3. Extract preserved messages (for gettext builder)
The translatable nodes MUST preserve original messages.
And these messages should not be overridden at applying step.
Because they are used at final step; extraction.
"""
def preserve_original_messages(self) -> None:
"""Preserve original translatable messages."""
raise NotImplementedError
def apply_translated_message(
self, original_message: str, translated_message: str
) -> None:
"""Apply translated message."""
raise NotImplementedError
def extract_original_messages(self) -> Sequence[str]:
"""Extract translation messages.
:returns: list of extracted messages or messages generator
"""
raise NotImplementedError
| translatable |
python | huggingface__transformers | tests/models/video_llama_3/test_modeling_video_llama_3.py | {
"start": 21871,
"end": 25854
} | class ____:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
image_size=14,
is_training=True,
text_config={
"attention_dropout": 0.0,
"bos_token_id": 0,
"eos_token_id": 1,
"pad_token_id": 2,
"hidden_act": "silu",
"hidden_size": 32,
"intermediate_size": 37,
"max_position_embeddings": 512,
"max_window_layers": 3,
"model_type": "qwen2",
"num_attention_heads": 4,
"num_hidden_layers": 2,
"num_key_value_heads": 2,
"rms_norm_eps": 1e-06,
"rope_scaling": None,
"rope_theta": 1000000.0,
"sliding_window": None,
"tie_word_embeddings": True,
"vocab_size": 99,
},
vision_config={
"attention_dropout": 0.0,
"hidden_act": "gelu_pytorch_tanh",
"hidden_size": 32,
"intermediate_size": 64,
"layer_norm_eps": 1e-06,
"model_type": "video_llama_3_vision",
"num_attention_heads": 4,
"num_channels": 3,
"num_hidden_layers": 2,
"patch_size": 14,
},
use_token_compression=True,
image_token_id=3,
video_token_id=4,
):
self.parent = parent
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.patch_size = vision_config["patch_size"]
self.batch_size = batch_size
self.seq_length = seq_length
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.text_config = text_config
self.vision_config = vision_config
self.use_token_compression = use_token_compression
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.num_image_tokens = 32
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return VideoLlama3Config(
text_config=self.text_config,
vision_config=self.vision_config,
use_token_compression=self.use_token_compression,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2),
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[:, -1] = config.text_config.pad_token_id
attention_mask[:, -1] = 0
input_ids[input_ids == self.video_token_id] = config.text_config.pad_token_id
input_ids[input_ids == self.image_token_id] = config.text_config.pad_token_id
input_ids[:, self.num_image_tokens] = self.image_token_id
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device),
"image_merge_sizes": torch.tensor([1] * self.batch_size, device=torch_device),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| VideoLlama3VisionText2TextModelTester |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 981507,
"end": 991244
} | class ____(VegaLiteSchema):
"""
ProjectionConfig schema wrapper.
Parameters
----------
center : dict, Sequence[float], :class:`ExprRef`, :class:`Vector2number`
The projection's center, a two-element array of longitude and latitude in degrees.
**Default value:** ``[0, 0]``
clipAngle : dict, float, :class:`ExprRef`
The projection's clipping circle radius to the specified angle in degrees. If
``null``, switches to `antimeridian <http://bl.ocks.org/mbostock/3788999>`__ cutting
rather than small-circle clipping.
clipExtent : dict, :class:`ExprRef`, :class:`Vector2Vector2number`, Sequence[Sequence[float], :class:`Vector2number`]
The projection's viewport clip extent to the specified bounds in pixels. The extent
bounds are specified as an array ``[[x0, y0], [x1, y1]]``, where ``x0`` is the
left-side of the viewport, ``y0`` is the top, ``x1`` is the right and ``y1`` is the
bottom. If ``null``, no viewport clipping is performed.
coefficient : dict, float, :class:`ExprRef`
The coefficient parameter for the ``hammer`` projection.
**Default value:** ``2``
distance : dict, float, :class:`ExprRef`
For the ``satellite`` projection, the distance from the center of the sphere to the
point of view, as a proportion of the sphere's radius. The recommended maximum clip
angle for a given ``distance`` is acos(1 / distance) converted to degrees. If tilt
is also applied, then more conservative clipping may be necessary.
**Default value:** ``2.0``
extent : dict, :class:`ExprRef`, :class:`Vector2Vector2number`, Sequence[Sequence[float], :class:`Vector2number`]
fit : dict, :class:`Fit`, :class:`ExprRef`, :class:`GeoJsonFeature`, :class:`GeoJsonFeatureCollection`, Sequence[dict, :class:`GeoJsonFeature`], Sequence[dict, :class:`Fit`, :class:`GeoJsonFeature`, :class:`GeoJsonFeatureCollection`, Sequence[dict, :class:`GeoJsonFeature`]]
fraction : dict, float, :class:`ExprRef`
The fraction parameter for the ``bottomley`` projection.
**Default value:** ``0.5``, corresponding to a sin(ψ) where ψ = π/6.
lobes : dict, float, :class:`ExprRef`
The number of lobes in projections that support multi-lobe views: ``berghaus``,
``gingery``, or ``healpix``. The default value varies based on the projection type.
parallel : dict, float, :class:`ExprRef`
The parallel parameter for projections that support it: ``armadillo``, ``bonne``,
``craig``, ``cylindricalEqualArea``, ``cylindricalStereographic``,
``hammerRetroazimuthal``, ``loximuthal``, or ``rectangularPolyconic``. The default
value varies based on the projection type.
parallels : dict, Sequence[float], :class:`ExprRef`
For conic projections, the `two standard parallels
<https://en.wikipedia.org/wiki/Map_projection#Conic>`__ that define the map layout.
The default depends on the specific conic projection used.
pointRadius : dict, float, :class:`ExprRef`
The default radius (in pixels) to use when drawing GeoJSON ``Point`` and
``MultiPoint`` geometries. This parameter sets a constant default value. To modify
the point radius in response to data, see the corresponding parameter of the GeoPath
and GeoShape transforms.
**Default value:** ``4.5``
precision : dict, float, :class:`ExprRef`
The threshold for the projection's `adaptive resampling
<http://bl.ocks.org/mbostock/3795544>`__ to the specified value in pixels. This
value corresponds to the `Douglas-Peucker distance
<http://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm>`__.
If precision is not specified, returns the projection's current resampling precision
which defaults to ``√0.5 ≅ 0.70710…``.
radius : dict, float, :class:`ExprRef`
The radius parameter for the ``airy`` or ``gingery`` projection. The default value
varies based on the projection type.
ratio : dict, float, :class:`ExprRef`
The ratio parameter for the ``hill``, ``hufnagel``, or ``wagner`` projections. The
default value varies based on the projection type.
reflectX : bool, dict, :class:`ExprRef`
Sets whether or not the x-dimension is reflected (negated) in the output.
reflectY : bool, dict, :class:`ExprRef`
Sets whether or not the y-dimension is reflected (negated) in the output.
rotate : dict, Sequence[float], :class:`ExprRef`, :class:`Vector2number`, :class:`Vector3number`
The projection's three-axis rotation to the specified angles, which must be a two-
or three-element array of numbers [``lambda``, ``phi``, ``gamma``] specifying the
rotation angles in degrees about each spherical axis. (These correspond to yaw,
pitch and roll.)
**Default value:** ``[0, 0, 0]``
scale : dict, float, :class:`ExprRef`
The projection's scale (zoom) factor, overriding automatic fitting. The default
scale is projection-specific. The scale factor corresponds linearly to the distance
between projected points; however, scale factor values are not equivalent across
projections.
size : dict, Sequence[float], :class:`ExprRef`, :class:`Vector2number`
Used in conjunction with fit, provides the width and height in pixels of the area to
which the projection should be automatically fit.
spacing : dict, float, :class:`ExprRef`
The spacing parameter for the ``lagrange`` projection.
**Default value:** ``0.5``
tilt : dict, float, :class:`ExprRef`
The tilt angle (in degrees) for the ``satellite`` projection.
**Default value:** ``0``.
translate : dict, Sequence[float], :class:`ExprRef`, :class:`Vector2number`
The projection's translation offset as a two-element array ``[tx, ty]``.
type : dict, :class:`ExprRef`, :class:`ProjectionType`, Literal['albers', 'albersUsa', 'azimuthalEqualArea', 'azimuthalEquidistant', 'conicConformal', 'conicEqualArea', 'conicEquidistant', 'equalEarth', 'equirectangular', 'gnomonic', 'identity', 'mercator', 'naturalEarth1', 'orthographic', 'stereographic', 'transverseMercator']
The cartographic projection to use. This value is case-insensitive, for example
``"albers"`` and ``"Albers"`` indicate the same projection type. You can find all
valid projection types `in the documentation
<https://vega.github.io/vega-lite/docs/projection.html#projection-types>`__.
**Default value:** ``equalEarth``
"""
_schema = {"$ref": "#/definitions/ProjectionConfig"}
def __init__(
self,
center: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
clipAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
clipExtent: Optional[
Parameter | SchemaBase | Sequence[SchemaBase | Sequence[float]] | Map
] = Undefined,
coefficient: Optional[float | Parameter | SchemaBase | Map] = Undefined,
distance: Optional[float | Parameter | SchemaBase | Map] = Undefined,
extent: Optional[
Parameter | SchemaBase | Sequence[SchemaBase | Sequence[float]] | Map
] = Undefined,
fit: Optional[
Parameter
| SchemaBase
| Sequence[SchemaBase | Map]
| Sequence[SchemaBase | Sequence[SchemaBase | Map] | Map]
| Map
] = Undefined,
fraction: Optional[float | Parameter | SchemaBase | Map] = Undefined,
lobes: Optional[float | Parameter | SchemaBase | Map] = Undefined,
parallel: Optional[float | Parameter | SchemaBase | Map] = Undefined,
parallels: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
pointRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
precision: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ratio: Optional[float | Parameter | SchemaBase | Map] = Undefined,
reflectX: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
reflectY: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
rotate: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
scale: Optional[float | Parameter | SchemaBase | Map] = Undefined,
size: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
spacing: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tilt: Optional[float | Parameter | SchemaBase | Map] = Undefined,
translate: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
type: Optional[Parameter | SchemaBase | Map | ProjectionType_T] = Undefined,
**kwds,
):
super().__init__(
center=center,
clipAngle=clipAngle,
clipExtent=clipExtent,
coefficient=coefficient,
distance=distance,
extent=extent,
fit=fit,
fraction=fraction,
lobes=lobes,
parallel=parallel,
parallels=parallels,
pointRadius=pointRadius,
precision=precision,
radius=radius,
ratio=ratio,
reflectX=reflectX,
reflectY=reflectY,
rotate=rotate,
scale=scale,
size=size,
spacing=spacing,
tilt=tilt,
translate=translate,
type=type,
**kwds,
)
| ProjectionConfig |
python | celery__celery | t/unit/utils/test_platforms.py | {
"start": 13060,
"end": 14454
} | class ____:
def test_without_resource(self):
prev, platforms.resource = platforms.resource, None
try:
with pytest.raises(RuntimeError):
detached()
finally:
platforms.resource = prev
@patch('celery.platforms._create_pidlock')
@patch('celery.platforms.signals')
@patch('celery.platforms.maybe_drop_privileges')
@patch('os.geteuid')
@patch('builtins.open')
def test_default(self, open, geteuid, maybe_drop,
signals, pidlock):
geteuid.return_value = 0
context = detached(uid='user', gid='group')
assert isinstance(context, DaemonContext)
signals.reset.assert_called_with('SIGCLD')
maybe_drop.assert_called_with(uid='user', gid='group')
open.return_value = Mock()
geteuid.return_value = 5001
context = detached(uid='user', gid='group', logfile='/foo/bar')
assert isinstance(context, DaemonContext)
assert context.after_chdir
context.after_chdir()
open.assert_called_with('/foo/bar', 'a')
open.return_value.close.assert_called_with()
context = detached(pidfile='/foo/bar/pid')
assert isinstance(context, DaemonContext)
assert context.after_chdir
context.after_chdir()
pidlock.assert_called_with('/foo/bar/pid')
@t.skip.if_win32
| test_detached |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 34225,
"end": 38175
} | class ____(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label
classes. We expect labels to be provided in a `one_hot` representation. If
you want to provide labels as integers, please use
`SparseCategoricalCrossentropy` loss. There should be `num_classes` floating
point values per feature, i.e., the shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. For example, if
`0.1`, use `0.1 / num_classes` for non-target labels and
`0.9 + 0.1 / num_classes` for target labels.
axis: The axis along which to compute crossentropy (the features
axis). Defaults to `-1`.
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Examples:
Standalone usage:
>>> y_true = np.array([[0, 1, 0], [0, 0, 1]])
>>> y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = keras.losses.CategoricalCrossentropy()
>>> cce(y_true, y_pred)
1.177
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
0.814
>>> # Using 'sum' reduction type.
>>> cce = keras.losses.CategoricalCrossentropy(
... reduction="sum")
>>> cce(y_true, y_pred)
2.354
>>> # Using 'none' reduction type.
>>> cce = keras.losses.CategoricalCrossentropy(
... reduction=None)
>>> cce(y_true, y_pred)
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=keras.losses.CategoricalCrossentropy())
```
"""
def __init__(
self,
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction="sum_over_batch_size",
name="categorical_crossentropy",
dtype=None,
):
super().__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
dtype=dtype,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis,
)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
self.axis = axis
def get_config(self):
config = Loss.get_config(self)
config.update(
{
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
"axis": self.axis,
}
)
return config
@keras_export("keras.losses.CategoricalFocalCrossentropy")
| CategoricalCrossentropy |
python | pytorch__pytorch | torch/ao/quantization/fx/_equalize.py | {
"start": 4931,
"end": 8701
} | class ____(nn.Module):
r"""Observer for tracking the running min/max values of weight columns and
rows, and computing the quantization parameters for the weight rows.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
This observer is made up of 1 PerChannelMinMaxObserver `weight_col_obs` used
to record the running minimum and maximum of columns of incoming weight
tensors. This observer is intended to be used along with an
InputEqualizationObserver to calculate the equalization scale.
The running minimum/maximum :math:`w_\text{min/max}` are computed in the
same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`.
"""
def __init__(
self,
dtype=torch.qint8,
qscheme=torch.per_tensor_affine,
quant_min=None,
quant_max=None,
factory_kwargs=None,
) -> None:
super().__init__()
self.dtype = dtype
self.qscheme = qscheme
self.ch_axis = 1
per_channel_qscheme = qscheme
if qscheme in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
per_channel_qscheme = qsheme_mapping_per_tensor_to_per_channel[qscheme]
self.weight_col_obs = PerChannelMinMaxObserver(
ch_axis=1,
dtype=dtype,
qscheme=per_channel_qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
)
self.equalization_scale = torch.tensor(1)
def forward(self, w_orig):
if w_orig.ndim < 2 or w_orig.ndim > 5:
raise ValueError(
"InputEqualizationObserver only supports Linear and Conv layers"
)
return self.weight_col_obs(w_orig)
def get_weight_col_minmax(self):
return (self.weight_col_obs.min_val, self.weight_col_obs.max_val)
def set_equalization_scale(self, equalization_scale):
self.equalization_scale = equalization_scale
with_args = classmethod(_with_args)
def calculate_equalization_scale(
input_obs: _InputEqualizationObserver, weight_obs: _WeightEqualizationObserver
) -> torch.Tensor:
r"""Calculates the equalization scale and sets the equalization_scale value
in the observers.
Args:
input_obs: Observer that tracks the ranges for the input columns
weight_obs: Observer that tracks the ranges for the weight columns
"""
(min_inputs, max_inputs) = input_obs.get_input_minmax()
(min_weights, max_weights) = weight_obs.get_weight_col_minmax()
if not (
check_min_max_valid(min_inputs, max_inputs)
and check_min_max_valid(min_weights, max_weights)
):
warnings.warn(
"Must run observer before calling calculate_equalization_scale. "
+ "Returning default equalization scale torch.tensor(1).",
stacklevel=2,
)
return torch.tensor(1)
if min_inputs.shape != min_weights.shape:
raise ValueError(
"Input and Weight must have the same column dimension. "
+ f"Found {min_inputs.shape} and {min_weights.shape} shapes instead."
)
equalization_scale = torch.sqrt(
(max_weights - min_weights) / (max_inputs - min_inputs)
)
# Replace all 'inf', 'nan', 0's with 1s to prevent errors
equalization_scale[equalization_scale == 0.0] = 1
equalization_scale = torch.nan_to_num(equalization_scale, nan=1, posinf=1, neginf=1)
return equalization_scale
| _WeightEqualizationObserver |
python | walkccc__LeetCode | solutions/2261. K Divisible Elements Subarrays/2261.py | {
"start": 0,
"end": 103
} | class ____:
def __init__(self):
self.children: dict[int, TrieNode] = {}
self.count = 0
| TrieNode |
python | allegroai__clearml | clearml/utilities/process/mp.py | {
"start": 6498,
"end": 8088
} | class ____(object):
def __init__(self) -> None:
self._queue = ForkQueue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._thread.start()
def is_alive(self) -> bool:
return bool(self._thread) and self._thread.is_alive()
def apply_async(self, func: callable, args: Optional[List] = None) -> bool:
if not func:
return False
self._queue.put((func, args))
return True
def close(self, timeout: float = 5.0) -> None:
t = self._thread
if not t:
return
try:
# push something into queue so it knows this is the end
self._queue.put(None)
# wait fot thread it should not take long, so we have a 5 second timeout
# the background thread itself is doing nothing but push into a queue, so it should not take long
t.join(timeout=timeout)
except BaseException: # noqa
pass
# mark thread is done
self._thread = None
def _worker(self) -> None:
while True:
try:
request = self._queue.get(block=True, timeout=1.0)
if not request:
break
except Empty:
continue
# noinspection PyBroadException
try:
if request[1] is not None:
request[0](*request[1])
else:
request[0]()
except Exception:
pass
self._thread = None
| ThreadCalls |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 15721,
"end": 17205
} | class ____:
def get_filters(self, obj: Dashboard) -> tuple[PageFilters, DashboardFilters]:
from sentry.api.serializers.rest_framework.base import camel_to_snake_case
dashboard_filters = obj.get_filters()
page_filters: PageFilters = {
"projects": dashboard_filters.get("projects", []),
"environment": dashboard_filters.get("environment", []),
"expired": dashboard_filters.get("expired", False),
}
start, end, period = (
dashboard_filters.get("start"),
dashboard_filters.get("end"),
dashboard_filters.get("period"),
)
if start and end:
start_parsed, end_parsed = parse_timestamp(start), parse_timestamp(end)
page_filters["expired"], page_filters["start"] = outside_retention_with_modified_start(
start_parsed, end_parsed, obj.organization
)
page_filters["end"] = end_parsed
elif period:
page_filters["period"] = period
if dashboard_filters.get("utc") is not None:
page_filters["utc"] = dashboard_filters["utc"]
tag_filters: DashboardFilters = {}
for filter_key in ("release", "releaseId", "globalFilter"):
if dashboard_filters.get(camel_to_snake_case(filter_key)):
tag_filters[filter_key] = dashboard_filters[camel_to_snake_case(filter_key)]
return page_filters, tag_filters
| DashboardFiltersMixin |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/definition/dynamic.py | {
"start": 1041,
"end": 9271
} | class ____(
PartitionsDefinition,
NamedTuple(
"_DynamicPartitionsDefinition",
[
(
"partition_fn",
PublicAttr[
Optional[
Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]
]
],
),
("name", PublicAttr[Optional[str]]),
],
),
):
"""A partitions definition whose partition keys can be dynamically added and removed.
This is useful for cases where the set of partitions is not known at definition time,
but is instead determined at runtime.
Partitions can be added and removed using `instance.add_dynamic_partitions` and
`instance.delete_dynamic_partition` methods.
We recommended limiting partition counts for each asset to 100,000 partitions or fewer.
Args:
name (Optional[str]): The name of the partitions definition.
partition_fn (Optional[Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]]):
A function that returns the current set of partitions. This argument is deprecated and
will be removed in 2.0.0.
Examples:
.. code-block:: python
fruits = DynamicPartitionsDefinition(name="fruits")
@sensor(job=my_job)
def my_sensor(context):
return SensorResult(
run_requests=[RunRequest(partition_key="apple")],
dynamic_partitions_requests=[fruits.build_add_request(["apple"])]
)
"""
def __new__(
cls,
partition_fn: Optional[
Callable[[Optional[datetime]], Union[Sequence[Partition], Sequence[str]]]
] = None,
name: Optional[str] = None,
):
partition_fn = check.opt_callable_param(partition_fn, "partition_fn")
name = check.opt_str_param(name, "name")
if partition_fn is None and name is None:
raise DagsterInvalidDefinitionError(
"Must provide either partition_fn or name to DynamicPartitionsDefinition."
)
if partition_fn and name:
raise DagsterInvalidDefinitionError(
"Cannot provide both partition_fn and name to DynamicPartitionsDefinition."
)
return super().__new__(
cls,
partition_fn=check.opt_callable_param(partition_fn, "partition_fn"),
name=check.opt_str_param(name, "name"),
)
def _validated_name(self) -> str:
if self.name is None:
check.failed(
"Dynamic partitions definition must have a name to fetch dynamic partitions"
)
return self.name
def __eq__(self, other):
return (
isinstance(other, DynamicPartitionsDefinition)
and self.name == other.name
and self.partition_fn == other.partition_fn
)
def __hash__(self):
return hash(tuple(self.__repr__()))
def __str__(self) -> str:
if self.name:
return f'Dynamic partitions: "{self._validated_name()}"'
else:
return super().__str__()
def _ensure_dynamic_partitions_store(
self, dynamic_partitions_store: Optional["DynamicPartitionsStore"]
) -> "DynamicPartitionsStore":
if dynamic_partitions_store is None:
check.failed(
"The instance is not available to load partitions. You may be seeing this error"
" when using dynamic partitions with a version of dagster-webserver or"
" dagster-cloud that is older than 1.1.18. The other possibility is that an"
" internal framework error where a dynamic partitions store was not properly"
" threaded down a call stack."
)
return dynamic_partitions_store
@public
def get_partition_keys(
self,
current_time: Optional[datetime] = None,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> Sequence[str]:
"""Returns a list of strings representing the partition keys of the
PartitionsDefinition.
Args:
current_time (Optional[datetime]): A datetime object representing the current time, only
applicable to time-based partitions definitions.
dynamic_partitions_store (Optional[DynamicPartitionsStore]): The DynamicPartitionsStore
object that is responsible for fetching dynamic partitions. Required when the
partitions definition is a DynamicPartitionsDefinition with a name defined. Users
can pass the DagsterInstance fetched via `context.instance` to this argument.
Returns:
Sequence[str]
"""
with partition_loading_context(current_time, dynamic_partitions_store) as ctx:
if self.partition_fn:
partitions = self.partition_fn(current_time)
if all(isinstance(partition, Partition) for partition in partitions):
return [partition.name for partition in partitions] # type: ignore # (illegible conditional)
else:
return partitions # type: ignore # (illegible conditional)
else:
return self._ensure_dynamic_partitions_store(
ctx.dynamic_partitions_store
).get_dynamic_partitions(partitions_def_name=self._validated_name())
def get_serializable_unique_identifier(
self, dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None
) -> str:
with partition_loading_context(dynamic_partitions_store=dynamic_partitions_store) as ctx:
return self._ensure_dynamic_partitions_store(
ctx.dynamic_partitions_store
).get_dynamic_partitions_definition_id(self._validated_name())
def get_paginated_partition_keys(
self,
context: PartitionLoadingContext,
limit: int,
ascending: bool,
cursor: Optional[str] = None,
) -> PaginatedResults[str]:
with partition_loading_context(new_ctx=context):
partition_keys = self.get_partition_keys()
return PaginatedResults.create_from_sequence(
partition_keys, limit=limit, ascending=ascending, cursor=cursor
)
def has_partition_key(
self,
partition_key: str,
current_time: Optional[datetime] = None,
dynamic_partitions_store: Optional["DynamicPartitionsStore"] = None,
) -> bool:
with partition_loading_context(current_time, dynamic_partitions_store) as ctx:
if self.partition_fn:
return partition_key in self.get_partition_keys()
else:
if ctx.dynamic_partitions_store is None:
check.failed(
"The instance is not available to load partitions. You may be seeing this error"
" when using dynamic partitions with a version of dagster-webserver or"
" dagster-cloud that is older than 1.1.18. The other possibility is that an"
" internal framework error where a dynamic partitions store was not properly"
" threaded down a call stack."
)
return ctx.dynamic_partitions_store.has_dynamic_partition(
partitions_def_name=self._validated_name(), partition_key=partition_key
)
def build_add_request(self, partition_keys: Sequence[str]) -> AddDynamicPartitionsRequest:
check.sequence_param(partition_keys, "partition_keys", of_type=str)
validated_name = self._validated_name()
return AddDynamicPartitionsRequest(validated_name, partition_keys)
def build_delete_request(self, partition_keys: Sequence[str]) -> DeleteDynamicPartitionsRequest:
check.sequence_param(partition_keys, "partition_keys", of_type=str)
validated_name = self._validated_name()
return DeleteDynamicPartitionsRequest(validated_name, partition_keys)
| DynamicPartitionsDefinition |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py | {
"start": 11060,
"end": 11804
} | class ____(test.TestCase):
def test_compatible_dimensions_do_not_raise(self):
x = ops.convert_to_tensor(rng.rand(2, 3, 4))
operator = DomainDimensionStubOperator(3)
# Should not raise
self.evaluate(
linear_operator_util.assert_compatible_matrix_dimensions(operator, x))
def test_incompatible_dimensions_raise(self):
x = ops.convert_to_tensor(rng.rand(2, 4, 4))
operator = DomainDimensionStubOperator(3)
# pylint: disable=g-error-prone-assert-raises
with self.assertRaisesOpError("Dimensions are not compatible"):
self.evaluate(
linear_operator_util.assert_compatible_matrix_dimensions(operator, x))
# pylint: enable=g-error-prone-assert-raises
| AssertCompatibleMatrixDimensionsTest |
python | huggingface__transformers | examples/modular-transformers/modeling_new_task_model.py | {
"start": 1986,
"end": 3451
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
| NewTaskModelCausalLMOutputWithPast |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 18283,
"end": 18682
} | class ____(_Multi2VecBase, _VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.MULTI2VEC_PALM, frozen=True, exclude=True
)
videoFields: Optional[List[Multi2VecField]]
projectId: str
location: Optional[str]
modelId: Optional[str]
dimensions: Optional[int]
videoIntervalSeconds: Optional[int]
| _Multi2VecGoogleConfig |
python | doocs__leetcode | solution/2000-2099/2062.Count Vowel Substrings of a String/Solution.py | {
"start": 0,
"end": 341
} | class ____:
def countVowelSubstrings(self, word: str) -> int:
s = set("aeiou")
ans, n = 0, len(word)
for i in range(n):
t = set()
for c in word[i:]:
if c not in s:
break
t.add(c)
ans += len(t) == 5
return ans
| Solution |
python | fastai__fastai | fastai/data/transforms.py | {
"start": 13265,
"end": 13784
} | class ____(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
# %% ../../nbs/05_data.transforms.ipynb 92
| OneHotEncode |
python | fluentpython__example-code | attic/concurrency/wikipedia/orig/sync_py3.py | {
"start": 595,
"end": 3665
} | class ____(ValueError):
"""Raised if unable to parse POTD MediaWiki source"""
def gen_month_dates(year, month):
"""Produce all dates in a given year, month"""
a_date = datetime.date(year, month, 1)
one_day = datetime.timedelta(1)
while a_date.month == month:
yield '{:%Y-%m-%d}'.format(a_date)
a_date += one_day
def fetch_potd_url(iso_date):
"""Fetch POTD thumbnail URL for iso_date ('YYYY-MM-DD' format)"""
if verbose:
print(iso_date)
potd_url = POTD_BASE_URL + iso_date
try:
with urllib.request.urlopen(potd_url) as fp:
html = fp.read().decode('utf-8')
thumb_src = THUMB_SRC_RE.search(html)
if not thumb_src:
msg = 'cannot find thumbnail source for ' + potd_url
raise ParsingException(msg)
thumb_url = THUMB_BASE_URL+thumb_src.group(1)
except urllib.error.HTTPError:
return None
return thumb_url
def gen_img_names(iso_month):
"""Produce picture names by fetching POTD metadata"""
year, month = (int(part) for part in iso_month.split('-'))
for iso_date in gen_month_dates(year, month):
img_url = fetch_potd_url(iso_date)
if img_url is None:
break
yield (iso_date, img_url)
def fetch_image(iso_date, img_url):
"""Fetch and save image data for date and url"""
if verbose:
print('\t' + img_url)
with contextlib.closing(urllib.request.urlopen(img_url)) as fp:
img = fp.read()
img_filename = iso_date + '__' + img_url.split('/')[-1]
if verbose:
print('\t\twriting %0.1f Kbytes' % (len(img)/1024.0))
img_path = os.path.join(LOCAL_IMG_PATH, img_filename)
with io.open(img_path, 'wb') as fp:
fp.write(img)
return len(img)
def get_images(iso_month, max_count=0):
"""Download up to max_count images for a given month"""
if max_count is 0:
max_count = sys.maxsize
img_count = 0
total_size = 0
for iso_date, img_url in gen_img_names(iso_month):
total_size += fetch_image(iso_date, img_url)
img_count += 1
if img_count == max_count:
break
return (img_count, total_size)
def main():
"""Get "Pictures of The Day" from English Wikipedia for a given month"""
global verbose
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('year_month', help='year and month in YYYY-MM format')
parser.add_argument('-q', '--max_qty', type=int,
help='maximum number of files to download')
parser.add_argument('-v', '--verbose', action='store_true',
help='display progress information')
args = parser.parse_args()
verbose = args.verbose
t0 = time.time()
img_count, total_size = get_images(args.year_month, args.max_qty)
elapsed = time.time() - t0
print("images: %3d | total size: %6.1f Kbytes | elapsed time: %3ds" %
(img_count, total_size/1024.0, elapsed))
if __name__ == '__main__':
main()
| ParsingException |
python | getsentry__sentry | src/sentry/incidents/utils/types.py | {
"start": 258,
"end": 387
} | class ____:
entity: str
subscription_id: str
values: Any
timestamp: datetime
@dataclass
| ProcessedSubscriptionUpdate |
python | tiangolo__fastapi | docs_src/security/tutorial004.py | {
"start": 829,
"end": 897
} | class ____(BaseModel):
access_token: str
token_type: str
| Token |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 10106,
"end": 10187
} | class ____(VyperException):
"""Array index out of range."""
| ArrayIndexException |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/kubernetes_engine.py | {
"start": 6962,
"end": 10207
} | class ____(BaseTrigger):
"""Trigger which checks status of the operation."""
def __init__(
self,
operation_name: str,
project_id: str | None,
location: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
poll_interval: int = 10,
):
super().__init__()
self.operation_name = operation_name
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.poll_interval = poll_interval
self._hook: GKEAsyncHook | None = None
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize GKEOperationTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.kubernetes_engine.GKEOperationTrigger",
{
"operation_name": self.operation_name,
"project_id": self.project_id,
"location": self.location,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"poll_interval": self.poll_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Get operation status and yields corresponding event."""
hook = self._get_hook()
try:
while True:
operation = await hook.get_operation(
operation_name=self.operation_name,
project_id=self.project_id,
)
status = operation.status
if status == Operation.Status.DONE:
yield TriggerEvent(
{
"status": "success",
"message": "Operation is successfully ended.",
"operation_name": operation.name,
}
)
return
elif status in (Operation.Status.RUNNING, Operation.Status.PENDING):
self.log.info("Operation is still running.")
self.log.info("Sleeping for %ss...", self.poll_interval)
await asyncio.sleep(self.poll_interval)
else:
yield TriggerEvent(
{
"status": "failed",
"message": f"Operation has failed with status: {operation.status}",
}
)
return
except Exception as e:
self.log.exception("Exception occurred while checking operation status")
yield TriggerEvent(
{
"status": "error",
"message": str(e),
}
)
def _get_hook(self) -> GKEAsyncHook:
if self._hook is None:
self._hook = GKEAsyncHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
return self._hook
| GKEOperationTrigger |
python | huggingface__transformers | src/transformers/models/clipseg/configuration_clipseg.py | {
"start": 784,
"end": 5381
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CLIPSeg
[CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the CLIPSeg text model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`CLIPSegModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 49406):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 49407):
End of stream token id.
Example:
```python
>>> from transformers import CLIPSegTextConfig, CLIPSegTextModel
>>> # Initializing a CLIPSegTextConfig with CIDAS/clipseg-rd64 style configuration
>>> configuration = CLIPSegTextConfig()
>>> # Initializing a CLIPSegTextModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
>>> model = CLIPSegTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clipseg_text_model"
base_config_key = "text_config"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=77,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
pad_token_id=1,
bos_token_id=49406,
eos_token_id=49407,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
| CLIPSegTextConfig |
python | Textualize__textual | docs/examples/guide/workers/weather03.py | {
"start": 199,
"end": 1307
} | class ____(App):
"""App to display the current weather."""
CSS_PATH = "weather.tcss"
def compose(self) -> ComposeResult:
yield Input(placeholder="Enter a City")
with VerticalScroll(id="weather-container"):
yield Static(id="weather")
async def on_input_changed(self, message: Input.Changed) -> None:
"""Called when the input changes"""
self.update_weather(message.value)
@work(exclusive=True)
async def update_weather(self, city: str) -> None:
"""Update the weather for the given city."""
weather_widget = self.query_one("#weather", Static)
if city:
# Query the network API
url = f"https://wttr.in/{city}"
async with httpx.AsyncClient() as client:
response = await client.get(url)
weather = Text.from_ansi(response.text)
weather_widget.update(weather)
else:
# No city, so just blank out the weather
weather_widget.update("")
if __name__ == "__main__":
app = WeatherApp()
app.run()
| WeatherApp |
python | jmcnamara__XlsxWriter | xlsxwriter/test/core/test_core02.py | {
"start": 365,
"end": 2568
} | class ____(unittest.TestCase):
"""
Test assembling a complete Core file.
"""
def test_assemble_xml_file(self):
"""Test writing an Core file."""
self.maxDiff = None
fh = StringIO()
core = Core()
core._set_filehandle(fh)
properties = {
"title": "This is an example spreadsheet",
"subject": "With document properties",
"author": "John McNamara",
"manager": "Dr. Heinz Doofenshmirtz",
"company": "of Wolves",
"category": "Example spreadsheets",
"keywords": "Sample, Example, Properties",
"comments": "Created with Python and XlsxWriter",
"status": "Quo",
"created": datetime(2011, 4, 6, 19, 45, 15),
}
core._set_properties(properties)
core._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cp:coreProperties xmlns:cp="http://schemas.openxmlformats.org/package/2006/metadata/core-properties" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:dcterms="http://purl.org/dc/terms/" xmlns:dcmitype="http://purl.org/dc/dcmitype/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dc:title>This is an example spreadsheet</dc:title>
<dc:subject>With document properties</dc:subject>
<dc:creator>John McNamara</dc:creator>
<cp:keywords>Sample, Example, Properties</cp:keywords>
<dc:description>Created with Python and XlsxWriter</dc:description>
<cp:lastModifiedBy>John McNamara</cp:lastModifiedBy>
<dcterms:created xsi:type="dcterms:W3CDTF">2011-04-06T19:45:15Z</dcterms:created>
<dcterms:modified xsi:type="dcterms:W3CDTF">2011-04-06T19:45:15Z</dcterms:modified>
<cp:category>Example spreadsheets</cp:category>
<cp:contentStatus>Quo</cp:contentStatus>
</cp:coreProperties>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleCore |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/base.py | {
"start": 10514,
"end": 16768
} | class ____:
"""
REST handler for fsspec-like filesystem operations, using pathlib.Path.
Supports:
- fs_ls(path_str, detail=True)
- fs_info(path_str)
- fs_exists(path_str)
- fs_isfile(path_str)
- fs_isdir(path_str)
- fs_mkdir(path_str, create_parents=True, exist_ok=False)
- fs_rmdir(path_str)
- fs_rm_file(path_str, missing_ok=False)
- fs_touch(path_str, truncate=True)
"""
def _info_for_path(self, path: Path) -> dict:
"""Get fsspec-like info about a single path."""
out = path.stat(follow_symlinks=False)
link = stat.S_ISLNK(out.st_mode)
if link:
# If it's a link, stat the target
out = path.stat(follow_symlinks=True)
size = out.st_size
if stat.S_ISDIR(out.st_mode):
t = "directory"
elif stat.S_ISREG(out.st_mode):
t = "file"
else:
t = "other"
result = {
"name": str(path),
"size": size,
"type": t,
"created": out.st_ctime,
"islink": link,
}
for field in ["mode", "uid", "gid", "mtime", "ino", "nlink"]:
result[field] = getattr(out, f"st_{field}", None)
if link:
result["destination"] = str(path.resolve())
return result
def _load_path(self, path_str: str) -> Path | None:
"""Convert a path string to a pathlib.Path object."""
return Path(path_str).expanduser()
def fs_ls(self, path_str: str, detail: bool = True):
"""List objects at path, like fsspec.ls()."""
path = self._load_path(path_str)
if not path.exists():
raise FileNotFoundError(errno.ENOENT,
os.strerror(errno.ENOENT),
str(path))
if path.is_file():
# fsspec.ls of a file often returns a single entry
if detail:
yield self._info_for_path(path)
else:
yield str(path)
return
# Otherwise, it's a directory
for p in path.glob("*"):
if detail:
yield self._info_for_path(p)
else:
yield str(p)
def fs_info(self, path_str: str):
"""Get info about a single path, like fsspec.info()."""
path = self._load_path(path_str)
return self._info_for_path(path)
def fs_exists(self, path_str: str) -> bool:
"""Like fsspec.exists()."""
path = self._load_path(path_str)
return path.exists()
def fs_isfile(self, path_str: str) -> bool:
"""Like fsspec.isfile()."""
path = self._load_path(path_str)
return path.is_file()
def fs_isdir(self, path_str: str) -> bool:
"""Like fsspec.isdir()."""
path = self._load_path(path_str)
return path.is_dir()
def fs_mkdir(self, path_str: str, create_parents: bool = True, exist_ok: bool = False):
"""Like fsspec.mkdir()."""
path = self._load_path(path_str)
path.mkdir(parents=create_parents, exist_ok=exist_ok)
return {"success": True}
def fs_rmdir(self, path_str: str, non_empty: bool = False):
"""Like fsspec.rmdir() - remove if empty."""
path = self._load_path(path_str)
if non_empty:
rmtree(path)
else:
path.rmdir()
return {"success": True}
def fs_rm_file(self, path_str: str, missing_ok: bool = False):
"""Like fsspec.rm_file(), remove a single file."""
path = self._load_path(path_str)
path.unlink(missing_ok=missing_ok)
return {"success": True}
def fs_touch(self, path_str: str, truncate: bool = True):
"""
Like fsspec.touch(path, truncate=True).
If truncate=True, zero out file if exists. Otherwise just update mtime.
"""
path = self._load_path(path_str)
if path.exists() and not truncate:
now = datetime.datetime.now().timestamp()
os.utime(path, (now, now))
else:
# create or overwrite
with path.open("wb"):
pass
return {"success": True}
def fs_copy(self, src_str: str, dst_str: str, metadata: bool=False):
"""Like fsspec.copy()."""
src = self._load_path(src_str)
dst = self._load_path(dst_str)
if metadata:
copy2(src, dst)
else:
copy(src, dst)
return {"success": True}
def fs_move(self, src_str: str, dst_str: str):
"""Like fsspec.move()."""
src = self._load_path(src_str)
dst = self._load_path(dst_str)
if not src.exists():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(src))
if dst.exists():
raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST), str(dst))
src.rename(dst)
return {"success": True}
@contextmanager
def fs_zip_dir(
self,
path_str: str,
compression: int = 9,
chunk_size: int = 65536,
):
"""Stream compressed directory content."""
path = self._load_path(path_str)
zip_files = []
for p in path.glob("**/*"):
if p.is_file():
arcname = p.relative_to(path)
zip_files.append(
MemberFile(
name=str(arcname),
modified_at=datetime.datetime.fromtimestamp(p.stat().st_mtime),
data=p.open("rb"),
mode=0x7777 & p.stat().st_mode,
size=p.stat().st_size,
method=CompressionType.ZIP_64,
)
)
try:
if not zip_files:
yield None
else:
yield ZipStream(zip_files,
get_compressobj=lambda: zlib.compressobj(
wbits=-zlib.MAX_WBITS, level=compression,
),
chunk_size=chunk_size,
)
finally:
for f in zip_files:
f.data.close()
| FilesRESTMixin |
python | kennethreitz__tablib | src/tablib/exceptions.py | {
"start": 325,
"end": 401
} | class ____(NotImplementedError):
"Format is not supported"
| UnsupportedFormat |
python | astropy__astropy | astropy/coordinates/tests/test_shape_manipulation.py | {
"start": 2657,
"end": 17753
} | class ____(ShapeSetup):
"""Manipulation of Frame shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def test_getitem0101(self):
# We on purpose take a slice with only one element, as for the
# general tests it doesn't matter, but it allows us to check
# for a few cases that shapes correctly become scalar if we
# index our size-1 array down to a scalar. See gh-10113.
item = (slice(0, 1), slice(0, 1))
s0_0101 = self.s0[item]
assert s0_0101.shape == (1, 1)
assert_array_equal(s0_0101.data.lon, self.s0.data.lon[item])
assert np.may_share_memory(s0_0101.data.lon, self.s0.data.lon)
assert np.may_share_memory(s0_0101.data.lat, self.s0.data.lat)
s0_0101_00 = s0_0101[0, 0]
assert s0_0101_00.shape == ()
assert s0_0101_00.data.lon.shape == ()
assert_array_equal(s0_0101_00.data.lon, self.s0.data.lon[0, 0])
s1_0101 = self.s1[item]
assert s1_0101.shape == (1, 1)
assert_array_equal(s1_0101.data.lon, self.s1.data.lon[item])
assert np.may_share_memory(s1_0101.data.lat, self.s1.data.lat)
assert np.all(s1_0101.obstime == self.s1.obstime[item])
assert np.may_share_memory(s1_0101.obstime.jd1, self.s1.obstime.jd1)
assert_array_equal(s1_0101.location, self.s1.location[0, 0])
assert np.may_share_memory(s1_0101.location, self.s1.location)
assert_array_equal(s1_0101.temperature, self.s1.temperature[item])
assert np.may_share_memory(s1_0101.temperature, self.s1.temperature)
# scalar should just be transferred.
assert s1_0101.pressure is self.s1.pressure
s1_0101_00 = s1_0101[0, 0]
assert s1_0101_00.shape == ()
assert s1_0101_00.obstime.shape == ()
assert s1_0101_00.obstime == self.s1.obstime[0, 0]
s2_0101 = self.s2[item]
assert s2_0101.shape == (1, 1)
assert np.all(s2_0101.data.lon == self.s2.data.lon[item])
assert np.may_share_memory(s2_0101.data.lat, self.s2.data.lat)
assert np.all(s2_0101.obstime == self.s2.obstime[item])
assert np.may_share_memory(s2_0101.obstime.jd1, self.s2.obstime.jd1)
assert_array_equal(s2_0101.obsgeoloc.xyz, self.s2.obsgeoloc[item].xyz)
s3_0101 = self.s3[item]
assert s3_0101.shape == (1, 1)
assert s3_0101.obstime.shape == (1, 1)
assert np.all(s3_0101.obstime == self.s3.obstime[item])
assert np.may_share_memory(s3_0101.obstime.jd1, self.s3.obstime.jd1)
assert_array_equal(s3_0101.obsgeoloc.xyz, self.s3.obsgeoloc[item].xyz)
sc_0101 = self.sc[item]
assert sc_0101.shape == (1, 1)
assert_array_equal(sc_0101.data.lon, self.sc.data.lon[item])
assert np.may_share_memory(sc_0101.data.lat, self.sc.data.lat)
assert np.all(sc_0101.obstime == self.sc.obstime[item])
assert np.may_share_memory(sc_0101.obstime.jd1, self.sc.obstime.jd1)
assert_array_equal(sc_0101.obsgeoloc.xyz, self.sc.obsgeoloc[item].xyz)
def test_ravel(self):
s0_ravel = self.s0.ravel()
assert s0_ravel.shape == (self.s0.size,)
assert np.all(s0_ravel.data.lon == self.s0.data.lon.ravel())
assert np.may_share_memory(s0_ravel.data.lon, self.s0.data.lon)
assert np.may_share_memory(s0_ravel.data.lat, self.s0.data.lat)
# Since s1 lon, lat were broadcast, ravel needs to make a copy.
s1_ravel = self.s1.ravel()
assert s1_ravel.shape == (self.s1.size,)
assert np.all(s1_ravel.data.lon == self.s1.data.lon.ravel())
assert not np.may_share_memory(s1_ravel.data.lat, self.s1.data.lat)
assert np.all(s1_ravel.obstime == self.s1.obstime.ravel())
assert not np.may_share_memory(s1_ravel.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_ravel.location == self.s1.location.ravel())
assert not np.may_share_memory(s1_ravel.location, self.s1.location)
assert np.all(s1_ravel.temperature == self.s1.temperature.ravel())
assert np.may_share_memory(s1_ravel.temperature, self.s1.temperature)
assert s1_ravel.pressure == self.s1.pressure
s2_ravel = self.s2.ravel()
assert s2_ravel.shape == (self.s2.size,)
assert np.all(s2_ravel.data.lon == self.s2.data.lon.ravel())
assert not np.may_share_memory(s2_ravel.data.lat, self.s2.data.lat)
assert np.all(s2_ravel.obstime == self.s2.obstime.ravel())
assert not np.may_share_memory(s2_ravel.obstime.jd1, self.s2.obstime.jd1)
# CartesianRepresentation do not allow direct comparisons, as this is
# too tricky to get right in the face of rounding issues. Here, though,
# it cannot be an issue, so we compare the xyz quantities.
assert np.all(s2_ravel.obsgeoloc.xyz == self.s2.obsgeoloc.ravel().xyz)
assert not np.may_share_memory(s2_ravel.obsgeoloc.x, self.s2.obsgeoloc.x)
s3_ravel = self.s3.ravel()
assert s3_ravel.shape == (42,) # cannot use .size on frame w/o data.
assert np.all(s3_ravel.obstime == self.s3.obstime.ravel())
assert not np.may_share_memory(s3_ravel.obstime.jd1, self.s3.obstime.jd1)
assert np.all(s3_ravel.obsgeoloc.xyz == self.s3.obsgeoloc.ravel().xyz)
assert not np.may_share_memory(s3_ravel.obsgeoloc.x, self.s3.obsgeoloc.x)
sc_ravel = self.sc.ravel()
assert sc_ravel.shape == (self.sc.size,)
assert np.all(sc_ravel.data.lon == self.sc.data.lon.ravel())
assert not np.may_share_memory(sc_ravel.data.lat, self.sc.data.lat)
assert np.all(sc_ravel.obstime == self.sc.obstime.ravel())
assert not np.may_share_memory(sc_ravel.obstime.jd1, self.sc.obstime.jd1)
assert np.all(sc_ravel.obsgeoloc.xyz == self.sc.obsgeoloc.ravel().xyz)
assert not np.may_share_memory(sc_ravel.obsgeoloc.x, self.sc.obsgeoloc.x)
def test_flatten(self):
s0_flatten = self.s0.flatten()
assert s0_flatten.shape == (self.s0.size,)
assert np.all(s0_flatten.data.lon == self.s0.data.lon.flatten())
# Flatten always copies.
assert not np.may_share_memory(s0_flatten.data.lat, self.s0.data.lat)
s1_flatten = self.s1.flatten()
assert s1_flatten.shape == (self.s1.size,)
assert np.all(s1_flatten.data.lat == self.s1.data.lat.flatten())
assert not np.may_share_memory(s1_flatten.data.lon, self.s1.data.lat)
assert np.all(s1_flatten.obstime == self.s1.obstime.flatten())
assert not np.may_share_memory(s1_flatten.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_flatten.location == self.s1.location.flatten())
assert not np.may_share_memory(s1_flatten.location, self.s1.location)
assert np.all(s1_flatten.temperature == self.s1.temperature.flatten())
assert not np.may_share_memory(s1_flatten.temperature, self.s1.temperature)
assert s1_flatten.pressure == self.s1.pressure
def test_transpose(self):
s0_transpose = self.s0.transpose()
assert s0_transpose.shape == (7, 6)
assert np.all(s0_transpose.data.lon == self.s0.data.lon.transpose())
assert np.may_share_memory(s0_transpose.data.lat, self.s0.data.lat)
s1_transpose = self.s1.transpose()
assert s1_transpose.shape == (7, 6)
assert np.all(s1_transpose.data.lat == self.s1.data.lat.transpose())
assert np.may_share_memory(s1_transpose.data.lon, self.s1.data.lon)
assert np.all(s1_transpose.obstime == self.s1.obstime.transpose())
assert np.may_share_memory(s1_transpose.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_transpose.location == self.s1.location.transpose())
assert np.may_share_memory(s1_transpose.location, self.s1.location)
assert np.all(s1_transpose.temperature == self.s1.temperature.transpose())
assert np.may_share_memory(s1_transpose.temperature, self.s1.temperature)
assert s1_transpose.pressure == self.s1.pressure
# Only one check on T, since it just calls transpose anyway.
s1_T = self.s1.T
assert s1_T.shape == (7, 6)
assert np.all(s1_T.temperature == self.s1.temperature.T)
assert np.may_share_memory(s1_T.location, self.s1.location)
def test_diagonal(self):
s0_diagonal = self.s0.diagonal()
assert s0_diagonal.shape == (6,)
assert np.all(s0_diagonal.data.lat == self.s0.data.lat.diagonal())
assert np.may_share_memory(s0_diagonal.data.lat, self.s0.data.lat)
def test_swapaxes(self):
s1_swapaxes = self.s1.swapaxes(0, 1)
assert s1_swapaxes.shape == (7, 6)
assert np.all(s1_swapaxes.data.lat == self.s1.data.lat.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.data.lat, self.s1.data.lat)
assert np.all(s1_swapaxes.obstime == self.s1.obstime.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_swapaxes.location == self.s1.location.swapaxes(0, 1))
assert s1_swapaxes.location.shape == (7, 6)
assert np.may_share_memory(s1_swapaxes.location, self.s1.location)
assert np.all(s1_swapaxes.temperature == self.s1.temperature.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.temperature, self.s1.temperature)
assert s1_swapaxes.pressure == self.s1.pressure
def test_reshape(self):
s0_reshape = self.s0.reshape(2, 3, 7)
assert s0_reshape.shape == (2, 3, 7)
assert np.all(s0_reshape.data.lon == self.s0.data.lon.reshape(2, 3, 7))
assert np.all(s0_reshape.data.lat == self.s0.data.lat.reshape(2, 3, 7))
assert np.may_share_memory(s0_reshape.data.lon, self.s0.data.lon)
assert np.may_share_memory(s0_reshape.data.lat, self.s0.data.lat)
s1_reshape = self.s1.reshape(3, 2, 7)
assert s1_reshape.shape == (3, 2, 7)
assert np.all(s1_reshape.data.lat == self.s1.data.lat.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.data.lat, self.s1.data.lat)
assert np.all(s1_reshape.obstime == self.s1.obstime.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_reshape.location == self.s1.location.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.location, self.s1.location)
assert np.all(s1_reshape.temperature == self.s1.temperature.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.temperature, self.s1.temperature)
assert s1_reshape.pressure == self.s1.pressure
# For reshape(3, 14), copying is necessary for lon, lat, location, time
s1_reshape2 = self.s1.reshape(3, 14)
assert s1_reshape2.shape == (3, 14)
assert np.all(s1_reshape2.data.lon == self.s1.data.lon.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.data.lon, self.s1.data.lon)
assert np.all(s1_reshape2.obstime == self.s1.obstime.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.obstime.jd1, self.s1.obstime.jd1)
assert np.all(s1_reshape2.location == self.s1.location.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.location, self.s1.location)
assert np.all(s1_reshape2.temperature == self.s1.temperature.reshape(3, 14))
assert np.may_share_memory(s1_reshape2.temperature, self.s1.temperature)
assert s1_reshape2.pressure == self.s1.pressure
s2_reshape = self.s2.reshape(3, 2, 7)
assert s2_reshape.shape == (3, 2, 7)
assert np.all(s2_reshape.data.lon == self.s2.data.lon.reshape(3, 2, 7))
assert np.may_share_memory(s2_reshape.data.lat, self.s2.data.lat)
assert np.all(s2_reshape.obstime == self.s2.obstime.reshape(3, 2, 7))
assert np.may_share_memory(s2_reshape.obstime.jd1, self.s2.obstime.jd1)
assert np.all(
s2_reshape.obsgeoloc.xyz == self.s2.obsgeoloc.reshape(3, 2, 7).xyz
)
assert np.may_share_memory(s2_reshape.obsgeoloc.x, self.s2.obsgeoloc.x)
s3_reshape = self.s3.reshape(3, 2, 7)
assert s3_reshape.shape == (3, 2, 7)
assert np.all(s3_reshape.obstime == self.s3.obstime.reshape(3, 2, 7))
assert np.may_share_memory(s3_reshape.obstime.jd1, self.s3.obstime.jd1)
assert np.all(
s3_reshape.obsgeoloc.xyz == self.s3.obsgeoloc.reshape(3, 2, 7).xyz
)
assert np.may_share_memory(s3_reshape.obsgeoloc.x, self.s3.obsgeoloc.x)
sc_reshape = self.sc.reshape(3, 2, 7)
assert sc_reshape.shape == (3, 2, 7)
assert np.all(sc_reshape.data.lon == self.sc.data.lon.reshape(3, 2, 7))
assert np.may_share_memory(sc_reshape.data.lat, self.sc.data.lat)
assert np.all(sc_reshape.obstime == self.sc.obstime.reshape(3, 2, 7))
assert np.may_share_memory(sc_reshape.obstime.jd1, self.sc.obstime.jd1)
assert np.all(
sc_reshape.obsgeoloc.xyz == self.sc.obsgeoloc.reshape(3, 2, 7).xyz
)
assert np.may_share_memory(sc_reshape.obsgeoloc.x, self.sc.obsgeoloc.x)
# For reshape(3, 14), the arrays all need to be copied.
sc_reshape2 = self.sc.reshape(3, 14)
assert sc_reshape2.shape == (3, 14)
assert np.all(sc_reshape2.data.lon == self.sc.data.lon.reshape(3, 14))
assert not np.may_share_memory(sc_reshape2.data.lat, self.sc.data.lat)
assert np.all(sc_reshape2.obstime == self.sc.obstime.reshape(3, 14))
assert not np.may_share_memory(sc_reshape2.obstime.jd1, self.sc.obstime.jd1)
assert np.all(sc_reshape2.obsgeoloc.xyz == self.sc.obsgeoloc.reshape(3, 14).xyz)
assert not np.may_share_memory(sc_reshape2.obsgeoloc.x, self.sc.obsgeoloc.x)
def test_squeeze(self):
s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze()
assert s0_squeeze.shape == (3, 2, 7)
assert np.all(s0_squeeze.data.lat == self.s0.data.lat.reshape(3, 2, 7))
assert np.may_share_memory(s0_squeeze.data.lat, self.s0.data.lat)
def test_add_dimension(self, method):
if method:
s0_adddim = self.s0[:, np.newaxis, :]
else:
s0_adddim = np.expand_dims(self.s0, 1)
assert s0_adddim.shape == (6, 1, 7)
assert np.all(s0_adddim.data.lon == self.s0.data.lon[:, np.newaxis, :])
assert np.may_share_memory(s0_adddim.data.lat, self.s0.data.lat)
def test_take(self):
s0_take = self.s0.take((5, 2))
assert s0_take.shape == (2,)
assert np.all(s0_take.data.lon == self.s0.data.lon.take((5, 2)))
# Much more detailed tests of shape manipulation via numpy functions done
# in test_representation_methods.
def test_broadcast_to(self):
s1_broadcast = np.broadcast_to(self.s1, (20, 6, 7))
assert s1_broadcast.shape == (20, 6, 7)
assert np.all(s1_broadcast.data.lon == self.s1.data.lon[np.newaxis])
assert np.may_share_memory(s1_broadcast.data.lat, self.s1.data.lat)
| TestManipulation |
python | plotly__plotly.py | plotly/graph_objs/scatterpolar/_hoverlabel.py | {
"start": 233,
"end": 11276
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scatterpolar.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 42652,
"end": 44940
} | class ____:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.float64)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res_v.shape)
assert_equal((0, 1), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res, res_v = linalg.eigh(a)
assert_(res_v.dtype.type is np.complex64)
assert_(res.dtype.type is np.float32)
assert_equal(a.shape, res_v.shape)
assert_equal((0,), res.shape)
# This is just for documentation, it might make sense to change:
assert_(isinstance(a, np.ndarray))
| TestEigh |
python | bokeh__bokeh | tests/unit/bokeh/client/test_connection.py | {
"start": 1301,
"end": 2551
} | class ____:
def test_creation(self) -> None:
c = bcc.ClientConnection("session", "wsurl")
assert c.url == "wsurl"
assert c.connected is False
assert isinstance(c.io_loop, IOLoop)
assert c._session == "session"
assert isinstance(c._state, NOT_YET_CONNECTED)
assert c._until_predicate is None
assert c._server_info is None
assert c._arguments is None
def test_creation_with_arguments(self) -> None:
c = bcc.ClientConnection("session", "wsurl", arguments=dict(foo="bar"))
assert c.url == "wsurl"
assert c.connected is False
assert isinstance(c.io_loop, IOLoop)
assert c._session == "session"
assert isinstance(c._state, NOT_YET_CONNECTED)
assert c._until_predicate is None
assert c._server_info is None
assert c._arguments == dict(foo="bar")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_ClientConnection |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v3_checkpoint_test.py | {
"start": 1291,
"end": 4728
} | class ____(parameterized.TestCase, test.TestCase):
def setUp(self):
super().setUp()
self.vocabulary_size = 16384
self.embedding_dim = 128
def test_checkpoint_save_and_restore(self):
feature_config_1 = (
tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self.vocabulary_size,
dim=self.embedding_dim,
initializer=init_ops_v2.Constant(1.0),
optimizer=tpu_embedding_v2_utils.SGD(learning_rate=1),
combiner="sum",
name="video"),
name="watched",
output_shape=[16]))
feature_config_2 = (
tpu_embedding_v2_utils.FeatureConfig(
table=tpu_embedding_v2_utils.TableConfig(
vocabulary_size=self.vocabulary_size,
dim=self.embedding_dim,
initializer=init_ops_v2.Constant(2.0), # different initializer
optimizer=tpu_embedding_v2_utils.SGD(learning_rate=1),
combiner="sum",
name="video"),
name="watched",
output_shape=[16]))
resolver = tpu_cluster_resolver.TPUClusterResolver(tpu="")
tpu_cluster_resolver.initialize_tpu_system(resolver)
strategy = tpu_strategy.TPUStrategy(resolver)
with strategy.scope():
model1 = tpu_embedding_v3.TPUEmbeddingV2(
feature_config=feature_config_1,
optimizer=tpu_embedding_v2_utils.SGD())
model1.build()
# Check saving from inside scope works.
checkpoint = util.Checkpoint(model=model1)
checkpoint.save(self._get_tmpdir("restore", "save"))
# Check the variable created by model1
expected_shard_shape = (self.vocabulary_size //
strategy.num_replicas_in_sync, self.embedding_dim)
self.assertIsInstance(model1._variables["video"]["parameters"],
tpu_embedding_v3.TPUEmbeddingShardedVariable)
self.assertLen(model1._variables["video"]["parameters"].values,
strategy.num_replicas_in_sync)
self.assertEqual(model1._variables["video"]["parameters"].values[0].shape,
expected_shard_shape)
self.assertAllEqual(
model1._variables["video"]["parameters"].values[0].numpy(),
np.ones(expected_shard_shape) * 1.0)
with strategy.scope():
model2 = tpu_embedding_v3.TPUEmbeddingV2(
feature_config=feature_config_2,
optimizer=tpu_embedding_v2_utils.SGD())
def fail_initializer(*args, **kwargs):
del args, kwargs
self.fail("initializer should not be called when restoring")
assert model2._batch_initialize_tables
model2._batch_initialize_tables = fail_initializer
checkpoint = util.Checkpoint(model=model2)
# Load from checkpoint
checkpoint.restore(self._get_tmpdir("restore", "save-1"))
model2.build()
# Check the variable restored by model2
self.assertAllEqual(
model2._variables["video"]["parameters"].values[0].numpy(),
np.ones(expected_shard_shape) * 1.0)
def _get_tmpdir(self, name, subdir=""):
segments = [os.environ.get("TEST_TMPDIR", "/tmp"), name] + (
[subdir] if subdir else []
)
return os.path.join(*segments)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
config.enable_mlir_bridge()
test.main()
| TPUEmbeddingV3CheckpointTest |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/event.py | {
"start": 660,
"end": 875
} | class ____(BaseEvent):
"""Event emitted when processing of a page begins."""
page_id: str
@classmethod
def class_name(cls) -> str:
return "PageDataFetchStartedEvent"
| PageDataFetchStartedEvent |
python | has2k1__plotnine | plotnine/scales/scales.py | {
"start": 643,
"end": 10113
} | class ____(List[scale]):
"""
List of scales
This class has methods the simplify the handling of
the ggplot object scales
"""
def append(self, sc: scale):
"""
Add / Update scale
Removes any previous scales that cover the same aesthetics
"""
ae = sc.aesthetics[0]
cover_ae = self.find(ae)
if any(cover_ae):
warn(_TPL_DUPLICATE_SCALE.format(ae), PlotnineWarning)
idx = cover_ae.index(True)
self.pop(idx)
# super() does not work well with reloads
list.append(self, sc)
def find(self, aesthetic: ScaledAestheticsName | str) -> list[bool]:
"""
Find scales for given aesthetic
Returns a list[bool] each scale if it covers the aesthetic
"""
return [aesthetic in s.aesthetics for s in self]
def input(self):
"""
Return a list of all the aesthetics covered by the scales
"""
lst = [s.aesthetics for s in self]
return list(itertools.chain(*lst))
def get_scales(
self, aesthetic: ScaledAestheticsName | str
) -> scale | None:
"""
Return the scale for the aesthetic or None if there isn't one
These are the scales specified by the user e.g
`ggplot() + scale_x_continuous()`
or those added by default during the plot building
process
"""
bool_lst = self.find(aesthetic)
try:
idx = bool_lst.index(True)
return self[idx]
except ValueError:
return None
@property
def x(self) -> scale | None:
"""
Return x scale
"""
return self.get_scales("x")
@property
def y(self) -> scale | None:
"""
Return y scale
"""
return self.get_scales("y")
def non_position_scales(self) -> Scales:
"""
Return a list of any non-position scales
"""
l = [
s
for s in self
if "x" not in s.aesthetics and "y" not in s.aesthetics
]
return Scales(l)
def position_scales(self) -> Scales:
"""
Return a list of the position scales that are present
"""
l = [s for s in self if ("x" in s.aesthetics) or ("y" in s.aesthetics)]
return Scales(l)
def train(self, data, vars, idx):
"""
Train the scales on the data.
The scales should be for the same aesthetic
e.g. x scales, y scales, color scales, ...
Parameters
----------
data : dataframe
data to use for training
vars : list | tuple
columns in data to use for training.
These should be all the aesthetics of
a scale type that are present in the
data. e.g x, xmin, xmax
idx : array_like
indices that map the data points to the
scales. These start at 1, so subtract 1 to
get the true index into the scales array
"""
idx = np.asarray(idx)
for col in vars:
for i, sc in enumerate(self, start=1):
bool_idx = i == idx
sc.train(data.loc[bool_idx, col])
def map(self, data, vars, idx):
"""
Map the data onto the scales
The scales should be for the same aesthetic
e.g. x scales, y scales, color scales, ...
Parameters
----------
data : dataframe
data with columns to map
This is modified inplace
vars : list | tuple
columns to map
idx : array_like
indices that link the data points to the
scales. These start at 1, so subtract 1 to
get the true index into the scales array
"""
idx = np.asarray(idx)
# discrete scales change the dtype
# from category to int. Use a new dataframe
# to collect these results.
# Using `type` preserves the subclass of pd.DataFrame
discrete_data = type(data)(index=data.index)
# Loop through each variable, mapping across each scale,
# then joining back into the copy of the data
for col in vars:
use_df = array_kind.discrete(data[col])
for i, sc in enumerate(self, start=1):
bool_idx = i == idx
results = sc.map(data.loc[bool_idx, col])
if use_df:
discrete_data.loc[bool_idx, col] = results
else:
data.loc[bool_idx, col] = results
for col in discrete_data:
data[col] = discrete_data[col]
def reset(self):
"""
Reset all the scales
"""
for sc in self:
sc.reset()
def train_df(self, data: pd.DataFrame, drop: bool = False):
"""
Train scales from a dataframe
"""
if (len(data) == 0) or (len(self) == 0):
return
# Each scale trains the columns it understands
for sc in self:
sc.train_df(data)
def map_df(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Map values from a dataframe.
Returns dataframe
"""
if (len(data) == 0) or (len(self) == 0):
return data
# Each scale maps the columns it understands
for sc in self:
data = sc.map_df(data)
return data
def transform_df(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Transform values in a dataframe.
Returns dataframe
"""
if (len(data) == 0) or (len(self) == 0):
return data
# Each scale transforms the columns it understands
for sc in self:
data = sc.transform_df(data)
return data
def inverse_df(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Inveres transform values in a dataframe.
Returns dataframe
"""
if (len(data) == 0) or (len(self) == 0):
return data
# Each scale transforms the columns it understands
for sc in self:
data = sc.inverse_df(data)
return data
def add_defaults(self, data, aesthetics):
"""
Add default scales for the aesthetics if there is none
Scales are added only if the aesthetic is mapped to
a column in the dataframe. This function may have to be
called separately after evaluating the aesthetics.
"""
if not aesthetics:
return
# aesthetics with scales
aws = set()
if self:
for s in (set(sc.aesthetics) for sc in self):
aws.update(s)
# aesthetics that do not have scales present
# We preserve the order of the aesthetics
new_aesthetics = [x for x in aesthetics if x not in aws]
if not new_aesthetics:
return
# If a new aesthetic corresponds to a column in the data
# frame, find a default scale for the type of data in that
# column
seen = set()
for ae in new_aesthetics:
col = aesthetics[ae]
if col not in data:
col = ae
scale_var = aes_to_scale(ae)
if self.get_scales(scale_var):
continue
seen.add(scale_var)
try:
sc = make_scale(scale_var, data[col])
except PlotnineError:
# Skip aesthetics with no scales (e.g. group, order, etc)
continue
self.append(sc)
def add_missing(self, aesthetics):
"""
Add missing but required scales.
Parameters
----------
aesthetics : list | tuple
Aesthetic names. Typically, ('x', 'y').
"""
# Keep only aesthetics that don't have scales
aesthetics = set(aesthetics) - set(self.input())
for ae in aesthetics:
scale_name = f"scale_{ae}_continuous"
scale_f = Registry[scale_name]
self.append(scale_f())
def scale_type(series):
"""
Get a suitable scale for the series
"""
if array_kind.continuous(series):
stype = "continuous"
elif array_kind.ordinal(series):
stype = "ordinal"
elif array_kind.discrete(series):
stype = "discrete"
elif array_kind.datetime(series):
stype = "datetime"
elif array_kind.timedelta(series):
stype = "timedelta"
else:
msg = (
"Don't know how to automatically pick scale for "
"object of type {}. Defaulting to 'continuous'"
)
warn(msg.format(series.dtype), PlotnineWarning)
stype = "continuous"
return stype
def make_scale(ae, series, *args, **kwargs):
"""
Return a proper scale object for the series
The scale is for the aesthetic ae, and args & kwargs
are passed on to the scale creating class
"""
if pdtypes.is_float_dtype(series) and np.isinf(series).all():
raise PlotnineError("Cannot create scale for infinite data")
stype = scale_type(series)
# filter parameters by scale type
if stype in ("discrete", "ordinal"):
with suppress(KeyError):
del kwargs["trans"]
scale_name = f"scale_{ae}_{stype}"
scale_klass = Registry[scale_name]
return scale_klass(*args, **kwargs)
| Scales |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_membership_by_name.py | {
"start": 259,
"end": 359
} | class ____(GQLResult):
project: Optional[ArtifactMembershipByNameProject]
| ArtifactMembershipByName |
python | jmcnamara__XlsxWriter | xlsxwriter/test/table/test_table01.py | {
"start": 481,
"end": 1978
} | class ____(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
worksheet.add_table("C3:F13")
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Column1"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Column4"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleTable |
python | pypa__virtualenv | src/virtualenv/app_data/via_disk_folder.py | {
"start": 3378,
"end": 4771
} | class ____(ContentStore, ABC):
def __init__(self, in_folder, key, msg_args) -> None:
self.in_folder = in_folder
self.key = key
self.msg_args = (*msg_args, self.file)
@property
def file(self):
return self.in_folder.path / f"{self.key}.json"
def exists(self):
return self.file.exists()
def read(self):
data, bad_format = None, False
try:
data = json.loads(self.file.read_text(encoding="utf-8"))
except ValueError:
bad_format = True
except Exception: # noqa: BLE001, S110
pass
else:
LOGGER.debug("got %s %s from %s", *self.msg_args)
return data
if bad_format:
with suppress(OSError): # reading and writing on the same file may cause race on multiple processes
self.remove()
return None
def remove(self):
self.file.unlink()
LOGGER.debug("removed %s %s at %s", *self.msg_args)
@contextmanager
def locked(self):
with self.in_folder.lock_for_key(self.key):
yield
def write(self, content):
folder = self.file.parent
folder.mkdir(parents=True, exist_ok=True)
self.file.write_text(json.dumps(content, sort_keys=True, indent=2), encoding="utf-8")
LOGGER.debug("wrote %s %s at %s", *self.msg_args)
| JSONStoreDisk |
python | huggingface__transformers | src/transformers/models/ibert/modeling_ibert.py | {
"start": 16463,
"end": 18350
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.quant_mode = config.quant_mode
self.act_bit = 8
self.weight_bit = 8
self.bias_bit = 32
self.ln_input_bit = 22
self.ln_output_bit = 32
self.dense = QuantLinear(
config.intermediate_size,
config.hidden_size,
bias=True,
weight_bit=self.weight_bit,
bias_bit=self.bias_bit,
quant_mode=self.quant_mode,
per_channel=True,
)
self.ln_input_act = QuantAct(self.ln_input_bit, quant_mode=self.quant_mode)
self.LayerNorm = IntLayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
output_bit=self.ln_output_bit,
quant_mode=self.quant_mode,
force_dequant=config.force_dequant,
)
self.output_activation = QuantAct(self.act_bit, quant_mode=self.quant_mode)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, hidden_states_scaling_factor, input_tensor, input_tensor_scaling_factor):
hidden_states, hidden_states_scaling_factor = self.dense(hidden_states, hidden_states_scaling_factor)
hidden_states = self.dropout(hidden_states)
hidden_states, hidden_states_scaling_factor = self.ln_input_act(
hidden_states,
hidden_states_scaling_factor,
identity=input_tensor,
identity_scaling_factor=input_tensor_scaling_factor,
)
hidden_states, hidden_states_scaling_factor = self.LayerNorm(hidden_states, hidden_states_scaling_factor)
hidden_states, hidden_states_scaling_factor = self.output_activation(
hidden_states, hidden_states_scaling_factor
)
return hidden_states, hidden_states_scaling_factor
| IBertOutput |
python | python__mypy | mypyc/codegen/emitmodule.py | {
"start": 18687,
"end": 55994
} | class ____:
def __init__(
self,
modules: dict[str, ModuleIR],
source_paths: dict[str, str],
group_name: str | None,
group_map: dict[str, str | None],
names: NameGenerator,
compiler_options: CompilerOptions,
) -> None:
"""Generator for C source for a compilation group.
The code for a compilation group contains an internal and an
external .h file, and then one .c if not in multi_file mode or
one .c file per module if in multi_file mode.
Arguments:
modules: (name, ir) pairs for each module in the group
source_paths: Map from module names to source file paths
group_name: The name of the group (or None if this is single-module compilation)
group_map: A map of modules to their group names
names: The name generator for the compilation
compiler_options: Mypyc specific options, including multi_file mode
"""
self.modules = modules
self.source_paths = source_paths
self.context = EmitterContext(names, group_name, group_map)
self.names = names
# Initializations of globals to simple values that we can't
# do statically because the windows loader is bad.
self.simple_inits: list[tuple[str, str]] = []
self.group_name = group_name
self.use_shared_lib = group_name is not None
self.compiler_options = compiler_options
self.multi_file = compiler_options.multi_file
# Multi-phase init is needed to enable free-threading. In the future we'll
# probably want to enable it always, but we'll wait until it's stable.
self.multi_phase_init = IS_FREE_THREADED
@property
def group_suffix(self) -> str:
return "_" + exported_name(self.group_name) if self.group_name else ""
@property
def short_group_suffix(self) -> str:
return "_" + exported_name(self.group_name.split(".")[-1]) if self.group_name else ""
def generate_c_for_modules(self) -> list[tuple[str, str]]:
file_contents = []
multi_file = self.use_shared_lib and self.multi_file
# Collect all literal refs in IR.
for module in self.modules.values():
for fn in module.functions:
collect_literals(fn, self.context.literals)
base_emitter = Emitter(self.context)
# Optionally just include the runtime library c files to
# reduce the number of compiler invocations needed
if self.compiler_options.include_runtime_files:
for name in RUNTIME_C_FILES:
base_emitter.emit_line(f'#include "{name}"')
base_emitter.emit_line(f'#include "__native{self.short_group_suffix}.h"')
base_emitter.emit_line(f'#include "__native_internal{self.short_group_suffix}.h"')
emitter = base_emitter
self.generate_literal_tables()
for module_name, module in self.modules.items():
if multi_file:
emitter = Emitter(self.context, filepath=self.source_paths[module_name])
emitter.emit_line(f'#include "__native{self.short_group_suffix}.h"')
emitter.emit_line(f'#include "__native_internal{self.short_group_suffix}.h"')
self.declare_module(module_name, emitter)
self.declare_internal_globals(module_name, emitter)
self.declare_imports(module.imports, emitter)
for cl in module.classes:
if cl.is_ext_class:
generate_class(cl, module_name, emitter)
# Generate Python extension module definitions and module initialization functions.
self.generate_module_def(emitter, module_name, module)
for fn in module.functions:
emitter.emit_line()
generate_native_function(fn, emitter, self.source_paths[module_name], module_name)
if fn.name != TOP_LEVEL_NAME and not fn.internal:
emitter.emit_line()
if is_fastcall_supported(fn, emitter.capi_version):
generate_wrapper_function(
fn, emitter, self.source_paths[module_name], module_name
)
else:
generate_legacy_wrapper_function(
fn, emitter, self.source_paths[module_name], module_name
)
if multi_file:
name = f"__native_{exported_name(module_name)}.c"
file_contents.append((name, "".join(emitter.fragments)))
# The external header file contains type declarations while
# the internal contains declarations of functions and objects
# (which are shared between shared libraries via dynamic
# exports tables and not accessed directly.)
ext_declarations = Emitter(self.context)
ext_declarations.emit_line(f"#ifndef MYPYC_NATIVE{self.group_suffix}_H")
ext_declarations.emit_line(f"#define MYPYC_NATIVE{self.group_suffix}_H")
ext_declarations.emit_line("#include <Python.h>")
ext_declarations.emit_line("#include <CPy.h>")
if self.compiler_options.depends_on_librt_internal:
ext_declarations.emit_line("#include <librt_internal.h>")
if any("librt.base64" in mod.capsules for mod in self.modules.values()):
ext_declarations.emit_line("#include <librt_base64.h>")
if any("librt.strings" in mod.capsules for mod in self.modules.values()):
ext_declarations.emit_line("#include <librt_strings.h>")
declarations = Emitter(self.context)
declarations.emit_line(f"#ifndef MYPYC_LIBRT_INTERNAL{self.group_suffix}_H")
declarations.emit_line(f"#define MYPYC_LIBRT_INTERNAL{self.group_suffix}_H")
declarations.emit_line("#include <Python.h>")
declarations.emit_line("#include <CPy.h>")
declarations.emit_line(f'#include "__native{self.short_group_suffix}.h"')
declarations.emit_line()
declarations.emit_line("int CPyGlobalsInit(void);")
declarations.emit_line()
for module_name, module in self.modules.items():
self.declare_finals(module_name, module.final_names, declarations)
for cl in module.classes:
generate_class_type_decl(cl, emitter, ext_declarations, declarations)
if cl.reuse_freed_instance:
generate_class_reuse(cl, emitter, ext_declarations, declarations)
self.declare_type_vars(module_name, module.type_var_names, declarations)
for fn in module.functions:
generate_function_declaration(fn, declarations)
for lib in sorted(self.context.group_deps):
elib = exported_name(lib)
short_lib = exported_name(lib.split(".")[-1])
declarations.emit_lines(
"#include <{}>".format(os.path.join(group_dir(lib), f"__native_{short_lib}.h")),
f"struct export_table_{elib} exports_{elib};",
)
sorted_decls = self.toposort_declarations()
emitter = base_emitter
self.generate_globals_init(emitter)
emitter.emit_line()
for declaration in sorted_decls:
decls = ext_declarations if declaration.is_type else declarations
if not declaration.is_type:
decls.emit_lines(f"extern {declaration.decl[0]}", *declaration.decl[1:])
# If there is a definition, emit it. Otherwise, repeat the declaration
# (without an extern).
if declaration.defn:
emitter.emit_lines(*declaration.defn)
else:
emitter.emit_lines(*declaration.decl)
else:
decls.emit_lines(*declaration.decl)
if self.group_name:
if self.compiler_options.separate:
self.generate_export_table(ext_declarations, emitter)
self.generate_shared_lib_init(emitter)
ext_declarations.emit_line("#endif")
declarations.emit_line("#endif")
output_dir = group_dir(self.group_name) if self.group_name else ""
return file_contents + [
(
os.path.join(output_dir, f"__native{self.short_group_suffix}.c"),
"".join(emitter.fragments),
),
(
os.path.join(output_dir, f"__native_internal{self.short_group_suffix}.h"),
"".join(declarations.fragments),
),
(
os.path.join(output_dir, f"__native{self.short_group_suffix}.h"),
"".join(ext_declarations.fragments),
),
]
def generate_literal_tables(self) -> None:
"""Generate tables containing descriptions of Python literals to construct.
We will store the constructed literals in a single array that contains
literals of all types. This way we can refer to an arbitrary literal by
its index.
"""
literals = self.context.literals
# During module initialization we store all the constructed objects here
self.declare_global("PyObject *[%d]" % literals.num_literals(), "CPyStatics")
# Descriptions of str literals
init_str = c_string_array_initializer(literals.encoded_str_values())
self.declare_global("const char * const []", "CPyLit_Str", initializer=init_str)
# Descriptions of bytes literals
init_bytes = c_string_array_initializer(literals.encoded_bytes_values())
self.declare_global("const char * const []", "CPyLit_Bytes", initializer=init_bytes)
# Descriptions of int literals
init_int = c_string_array_initializer(literals.encoded_int_values())
self.declare_global("const char * const []", "CPyLit_Int", initializer=init_int)
# Descriptions of float literals
init_floats = c_array_initializer(literals.encoded_float_values())
self.declare_global("const double []", "CPyLit_Float", initializer=init_floats)
# Descriptions of complex literals
init_complex = c_array_initializer(literals.encoded_complex_values())
self.declare_global("const double []", "CPyLit_Complex", initializer=init_complex)
# Descriptions of tuple literals
init_tuple = c_array_initializer(literals.encoded_tuple_values())
self.declare_global("const int []", "CPyLit_Tuple", initializer=init_tuple)
# Descriptions of frozenset literals
init_frozenset = c_array_initializer(literals.encoded_frozenset_values())
self.declare_global("const int []", "CPyLit_FrozenSet", initializer=init_frozenset)
def generate_export_table(self, decl_emitter: Emitter, code_emitter: Emitter) -> None:
"""Generate the declaration and definition of the group's export struct.
To avoid needing to deal with deeply platform specific issues
involving dynamic library linking (and some possibly
insurmountable issues involving cyclic dependencies), compiled
code accesses functions and data in other compilation groups
via an explicit "export struct".
Each group declares a struct type that contains a pointer to
every function and static variable it exports. It then
populates this struct and stores a pointer to it in a capsule
stored as an attribute named 'exports' on the group's shared
library's python module.
On load, a group's init function will import all of its
dependencies' exports tables using the capsule mechanism and
copy the contents into a local copy of the table (to eliminate
the need for a pointer indirection when accessing it).
Then, all calls to functions in another group and accesses to statics
from another group are done indirectly via the export table.
For example, a group containing a module b, where b contains a class B
and a function bar, would declare an export table like:
struct export_table_b {
PyTypeObject **CPyType_B;
PyObject *(*CPyDef_B)(CPyTagged cpy_r_x);
CPyTagged (*CPyDef_B___foo)(PyObject *cpy_r_self, CPyTagged cpy_r_y);
tuple_T2OI (*CPyDef_bar)(PyObject *cpy_r_x);
char (*CPyDef___top_level__)(void);
};
that would be initialized with:
static struct export_table_b exports = {
&CPyType_B,
&CPyDef_B,
&CPyDef_B___foo,
&CPyDef_bar,
&CPyDef___top_level__,
};
To call `b.foo`, then, a function in another group would do
`exports_b.CPyDef_bar(...)`.
"""
decls = decl_emitter.context.declarations
decl_emitter.emit_lines("", f"struct export_table{self.group_suffix} {{")
for name, decl in decls.items():
if decl.needs_export:
decl_emitter.emit_line(pointerize("\n".join(decl.decl), name))
decl_emitter.emit_line("};")
code_emitter.emit_lines("", f"static struct export_table{self.group_suffix} exports = {{")
for name, decl in decls.items():
if decl.needs_export:
code_emitter.emit_line(f"&{name},")
code_emitter.emit_line("};")
def generate_shared_lib_init(self, emitter: Emitter) -> None:
"""Generate the init function for a shared library.
A shared library contains all the actual code for a
compilation group.
The init function is responsible for creating Capsules that
wrap pointers to the initialization function of all the real
init functions for modules in this shared library as well as
the export table containing all the exported functions and
values from all the modules.
These capsules are stored in attributes of the shared library.
"""
assert self.group_name is not None
emitter.emit_line()
short_name = shared_lib_name(self.group_name).split(".")[-1]
emitter.emit_lines(
f"static int exec_{short_name}(PyObject *module)",
"{",
"int res;",
"PyObject *capsule;",
"PyObject *tmp;",
"",
)
if self.compiler_options.separate:
emitter.emit_lines(
'capsule = PyCapsule_New(&exports, "{}.exports", NULL);'.format(
shared_lib_name(self.group_name)
),
"if (!capsule) {",
"goto fail;",
"}",
'res = PyObject_SetAttrString(module, "exports", capsule);',
"Py_DECREF(capsule);",
"if (res < 0) {",
"goto fail;",
"}",
"",
)
for mod in self.modules:
name = exported_name(mod)
if self.multi_phase_init:
capsule_func_prefix = "CPyExec_"
capsule_name_prefix = "exec_"
emitter.emit_line(f"extern int CPyExec_{name}(PyObject *);")
else:
capsule_func_prefix = "CPyInit_"
capsule_name_prefix = "init_"
emitter.emit_line(f"extern PyObject *CPyInit_{name}(void);")
emitter.emit_lines(
'capsule = PyCapsule_New((void *){}{}, "{}.{}{}", NULL);'.format(
capsule_func_prefix,
name,
shared_lib_name(self.group_name),
capsule_name_prefix,
name,
),
"if (!capsule) {",
"goto fail;",
"}",
f'res = PyObject_SetAttrString(module, "{capsule_name_prefix}{name}", capsule);',
"Py_DECREF(capsule);",
"if (res < 0) {",
"goto fail;",
"}",
"",
)
for group in sorted(self.context.group_deps):
egroup = exported_name(group)
emitter.emit_lines(
'tmp = PyImport_ImportModule("{}"); if (!tmp) goto fail; Py_DECREF(tmp);'.format(
shared_lib_name(group)
),
'struct export_table_{} *pexports_{} = PyCapsule_Import("{}.exports", 0);'.format(
egroup, egroup, shared_lib_name(group)
),
f"if (!pexports_{egroup}) {{",
"goto fail;",
"}",
"memcpy(&exports_{group}, pexports_{group}, sizeof(exports_{group}));".format(
group=egroup
),
"",
)
emitter.emit_lines("return 0;", "fail:", "return -1;", "}")
if self.multi_phase_init:
emitter.emit_lines(
f"static PyModuleDef_Slot slots_{short_name}[] = {{",
f"{{Py_mod_exec, exec_{short_name}}},",
"{Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED},",
"{Py_mod_gil, Py_MOD_GIL_NOT_USED},",
"{0, NULL},",
"};",
)
size = 0 if self.multi_phase_init else -1
emitter.emit_lines(
f"static PyModuleDef module_def_{short_name} = {{",
"PyModuleDef_HEAD_INIT,",
f'.m_name = "{shared_lib_name(self.group_name)}",',
".m_doc = NULL,",
f".m_size = {size},",
".m_methods = NULL,",
)
if self.multi_phase_init:
emitter.emit_line(f".m_slots = slots_{short_name},")
emitter.emit_line("};")
if self.multi_phase_init:
emitter.emit_lines(
f"PyMODINIT_FUNC PyInit_{short_name}(void) {{",
f"return PyModuleDef_Init(&module_def_{short_name});",
"}",
)
else:
emitter.emit_lines(
f"PyMODINIT_FUNC PyInit_{short_name}(void) {{",
"static PyObject *module = NULL;",
"if (module) {",
"Py_INCREF(module);",
"return module;",
"}",
f"module = PyModule_Create(&module_def_{short_name});",
"if (!module) {",
"return NULL;",
"}",
f"if (exec_{short_name}(module) < 0) {{",
"Py_DECREF(module);",
"return NULL;",
"}",
"return module;",
"}",
)
def generate_globals_init(self, emitter: Emitter) -> None:
emitter.emit_lines(
"",
"int CPyGlobalsInit(void)",
"{",
"static int is_initialized = 0;",
"if (is_initialized) return 0;",
"",
)
emitter.emit_line("CPy_Init();")
for symbol, fixup in self.simple_inits:
emitter.emit_line(f"{symbol} = {fixup};")
values = "CPyLit_Str, CPyLit_Bytes, CPyLit_Int, CPyLit_Float, CPyLit_Complex, CPyLit_Tuple, CPyLit_FrozenSet"
emitter.emit_lines(
f"if (CPyStatics_Initialize(CPyStatics, {values}) < 0) {{", "return -1;", "}"
)
emitter.emit_lines("is_initialized = 1;", "return 0;", "}")
def generate_module_def(self, emitter: Emitter, module_name: str, module: ModuleIR) -> None:
"""Emit the PyModuleDef struct for a module and the module init function."""
module_prefix = emitter.names.private_name(module_name)
self.emit_module_methods(emitter, module_name, module_prefix, module)
self.emit_module_exec_func(emitter, module_name, module_prefix, module)
# If using multi-phase init and a shared lib, parts of module definition
# will happen in the shim modules, so we skip some steps here.
if not (self.multi_phase_init and self.use_shared_lib):
if self.multi_phase_init:
self.emit_module_def_slots(emitter, module_prefix, module_name)
self.emit_module_def_struct(emitter, module_name, module_prefix)
self.emit_module_init_func(emitter, module_name, module_prefix)
def emit_module_def_slots(
self, emitter: Emitter, module_prefix: str, module_name: str
) -> None:
name = f"{module_prefix}_slots"
exec_name = f"CPyExec_{exported_name(module_name)}"
emitter.emit_line(f"static PyModuleDef_Slot {name}[] = {{")
emitter.emit_line(f"{{Py_mod_exec, {exec_name}}},")
if sys.version_info >= (3, 12):
# Multiple interpreter support requires not using any C global state,
# which we don't support yet.
emitter.emit_line(
"{Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED},"
)
if sys.version_info >= (3, 13):
# Declare support for free-threading to enable experimentation,
# even if we don't properly support it.
emitter.emit_line("{Py_mod_gil, Py_MOD_GIL_NOT_USED},")
emitter.emit_line("{0, NULL},")
emitter.emit_line("};")
def emit_module_methods(
self, emitter: Emitter, module_name: str, module_prefix: str, module: ModuleIR
) -> None:
"""Emit module methods (the static PyMethodDef table)."""
emitter.emit_line(f"static PyMethodDef {module_prefix}module_methods[] = {{")
for fn in module.functions:
if fn.class_name is not None or fn.name == TOP_LEVEL_NAME:
continue
# Coroutines are added to the module dict when the module is initialized.
if fn.decl.is_coroutine:
continue
name = short_id_from_name(fn.name, fn.decl.shortname, fn.line)
if is_fastcall_supported(fn, emitter.capi_version):
flag = "METH_FASTCALL"
else:
flag = "METH_VARARGS"
doc = native_function_doc_initializer(fn)
emitter.emit_line(
(
'{{"{name}", (PyCFunction){prefix}{cname}, {flag} | METH_KEYWORDS, '
"PyDoc_STR({doc}) /* docstring */}},"
).format(
name=name, cname=fn.cname(emitter.names), prefix=PREFIX, flag=flag, doc=doc
)
)
emitter.emit_line("{NULL, NULL, 0, NULL}")
emitter.emit_line("};")
emitter.emit_line()
def emit_module_def_struct(
self, emitter: Emitter, module_name: str, module_prefix: str
) -> None:
"""Emit the static module definition struct (PyModuleDef)."""
emitter.emit_lines(
f"static struct PyModuleDef {module_prefix}module = {{",
"PyModuleDef_HEAD_INIT,",
f'"{module_name}",',
"NULL, /* docstring */",
"0, /* size of per-interpreter state of the module */",
f"{module_prefix}module_methods,",
)
if self.multi_phase_init and not self.use_shared_lib:
slots_name = f"{module_prefix}_slots"
emitter.emit_line(f"{slots_name}, /* m_slots */")
else:
emitter.emit_line("NULL,")
emitter.emit_line("};")
emitter.emit_line()
def emit_coroutine_wrappers(self, emitter: Emitter, module: ModuleIR, globals: str) -> None:
"""Emit insertion of coroutines into the module dict when the module is initialized.
Coroutines are wrapped in CPyFunction objects to enable introspection by functions like
inspect.iscoroutinefunction(fn).
"""
for fn in module.functions:
if fn.class_name is not None or fn.name == TOP_LEVEL_NAME:
continue
if not fn.decl.is_coroutine:
continue
filepath = self.source_paths[module.fullname]
error_stmt = " goto fail;"
name = short_id_from_name(fn.name, fn.decl.shortname, fn.line)
wrapper_name = emitter.emit_cpyfunction_instance(fn, name, filepath, error_stmt)
name_obj = f"{wrapper_name}_name"
emitter.emit_line(f'PyObject *{name_obj} = PyUnicode_FromString("{fn.name}");')
emitter.emit_line(f"if (unlikely(!{name_obj}))")
emitter.emit_line(error_stmt)
emitter.emit_line(
f"if (PyDict_SetItem({globals}, {name_obj}, (PyObject *){wrapper_name}) < 0)"
)
emitter.emit_line(error_stmt)
def emit_module_exec_func(
self, emitter: Emitter, module_name: str, module_prefix: str, module: ModuleIR
) -> None:
"""Emit the module exec function.
If we are compiling just one module, this will be the normal C API
exec function. If we are compiling 2+ modules, we generate a shared
library for the modules and shims that call into the shared
library, and in this case the shared module defines an internal
exec function for each module and these will be called by the shims
via Capsules.
"""
declaration = f"int CPyExec_{exported_name(module_name)}(PyObject *module)"
module_static = self.module_internal_static_name(module_name, emitter)
emitter.emit_lines(declaration, "{")
if self.compiler_options.depends_on_librt_internal:
emitter.emit_line("if (import_librt_internal() < 0) {")
emitter.emit_line("return -1;")
emitter.emit_line("}")
if "librt.base64" in module.capsules:
emitter.emit_line("if (import_librt_base64() < 0) {")
emitter.emit_line("return -1;")
emitter.emit_line("}")
if "librt.strings" in module.capsules:
emitter.emit_line("if (import_librt_strings() < 0) {")
emitter.emit_line("return -1;")
emitter.emit_line("}")
emitter.emit_line("PyObject* modname = NULL;")
if self.multi_phase_init:
emitter.emit_line(f"{module_static} = module;")
emitter.emit_line(
f'modname = PyObject_GetAttrString((PyObject *){module_static}, "__name__");'
)
module_globals = emitter.static_name("globals", module_name)
emitter.emit_lines(
f"{module_globals} = PyModule_GetDict({module_static});",
f"if (unlikely({module_globals} == NULL))",
" goto fail;",
)
if self.multi_phase_init:
emitter.emit_lines(
f"if (PyModule_AddFunctions(module, {module_prefix}module_methods) < 0)",
" goto fail;",
)
self.emit_coroutine_wrappers(emitter, module, module_globals)
# HACK: Manually instantiate generated classes here
type_structs: list[str] = []
for cl in module.classes:
type_struct = emitter.type_struct_name(cl)
type_structs.append(type_struct)
if cl.is_generated:
error_stmt = " goto fail;"
emitter.emit_lines(
"{t} = (PyTypeObject *)CPyType_FromTemplate("
"(PyObject *){t}_template, NULL, modname);".format(t=type_struct)
)
emitter.emit_lines(f"if (unlikely(!{type_struct}))", error_stmt)
name_prefix = cl.name_prefix(emitter.names)
emitter.emit_line(f"CPyDef_{name_prefix}_trait_vtable_setup();")
if cl.coroutine_name:
fn = cl.methods["__call__"]
filepath = self.source_paths[module.fullname]
name = cl.coroutine_name
wrapper_name = emitter.emit_cpyfunction_instance(
fn, name, filepath, error_stmt
)
static_name = emitter.static_name(cl.name + "_cpyfunction", module.fullname)
emitter.emit_line(f"{static_name} = {wrapper_name};")
emitter.emit_lines("if (CPyGlobalsInit() < 0)", " goto fail;")
self.generate_top_level_call(module, emitter)
emitter.emit_lines("Py_DECREF(modname);")
emitter.emit_line("return 0;")
emitter.emit_lines("fail:")
if self.multi_phase_init:
emitter.emit_lines(f"{module_static} = NULL;", "Py_CLEAR(modname);")
else:
emitter.emit_lines(f"Py_CLEAR({module_static});", "Py_CLEAR(modname);")
for name, typ in module.final_names:
static_name = emitter.static_name(name, module_name)
emitter.emit_dec_ref(static_name, typ, is_xdec=True)
undef = emitter.c_undefined_value(typ)
emitter.emit_line(f"{static_name} = {undef};")
# the type objects returned from CPyType_FromTemplate are all new references
# so we have to decref them
for t in type_structs:
emitter.emit_line(f"Py_CLEAR({t});")
emitter.emit_line("return -1;")
emitter.emit_line("}")
def emit_module_init_func(
self, emitter: Emitter, module_name: str, module_prefix: str
) -> None:
if not self.use_shared_lib:
declaration = f"PyMODINIT_FUNC PyInit_{module_name}(void)"
else:
declaration = f"PyObject *CPyInit_{exported_name(module_name)}(void)"
emitter.emit_lines(declaration, "{")
if self.multi_phase_init:
def_name = f"{module_prefix}module"
emitter.emit_line(f"return PyModuleDef_Init(&{def_name});")
emitter.emit_line("}")
return
exec_func = f"CPyExec_{exported_name(module_name)}"
# Store the module reference in a static and return it when necessary.
# This is separate from the *global* reference to the module that will
# be populated when it is imported by a compiled module. We want that
# reference to only be populated when the module has been successfully
# imported, whereas this we want to have to stop a circular import.
module_static = self.module_internal_static_name(module_name, emitter)
emitter.emit_lines(
f"if ({module_static}) {{",
f"Py_INCREF({module_static});",
f"return {module_static};",
"}",
)
emitter.emit_lines(
f"{module_static} = PyModule_Create(&{module_prefix}module);",
f"if (unlikely({module_static} == NULL))",
" goto fail;",
)
emitter.emit_lines(f"if ({exec_func}({module_static}) != 0)", " goto fail;")
emitter.emit_line(f"return {module_static};")
emitter.emit_lines("fail:", "return NULL;")
emitter.emit_lines("}")
def generate_top_level_call(self, module: ModuleIR, emitter: Emitter) -> None:
"""Generate call to function representing module top level."""
# Optimization: we tend to put the top level last, so reverse iterate
for fn in reversed(module.functions):
if fn.name == TOP_LEVEL_NAME:
emitter.emit_lines(
f"char result = {emitter.native_function_name(fn.decl)}();",
"if (result == 2)",
" goto fail;",
)
break
def toposort_declarations(self) -> list[HeaderDeclaration]:
"""Topologically sort the declaration dict by dependencies.
Declarations can require other declarations to come prior in C (such as declaring structs).
In order to guarantee that the C output will compile the declarations will thus need to
be properly ordered. This simple DFS guarantees that we have a proper ordering.
This runs in O(V + E).
"""
result = []
marked_declarations: dict[str, MarkedDeclaration] = {}
for k, v in self.context.declarations.items():
marked_declarations[k] = MarkedDeclaration(v, False)
def _toposort_visit(name: str) -> None:
decl = marked_declarations[name]
if decl.mark:
return
for child in decl.declaration.dependencies:
_toposort_visit(child)
result.append(decl.declaration)
decl.mark = True
for name in marked_declarations:
_toposort_visit(name)
return result
def declare_global(
self, type_spaced: str, name: str, *, initializer: str | None = None
) -> None:
if "[" not in type_spaced:
base = f"{type_spaced}{name}"
else:
a, b = type_spaced.split("[", 1)
base = f"{a}{name}[{b}"
if not initializer:
defn = None
else:
defn = [f"{base} = {initializer};"]
if name not in self.context.declarations:
self.context.declarations[name] = HeaderDeclaration(f"{base};", defn=defn)
def declare_internal_globals(self, module_name: str, emitter: Emitter) -> None:
static_name = emitter.static_name("globals", module_name)
self.declare_global("PyObject *", static_name)
def module_internal_static_name(self, module_name: str, emitter: Emitter) -> str:
return emitter.static_name(module_name + "__internal", None, prefix=MODULE_PREFIX)
def declare_module(self, module_name: str, emitter: Emitter) -> None:
# We declare two globals for each compiled module:
# one used internally in the implementation of module init to cache results
# and prevent infinite recursion in import cycles, and one used
# by other modules to refer to it.
if module_name in self.modules:
internal_static_name = self.module_internal_static_name(module_name, emitter)
self.declare_global("CPyModule *", internal_static_name, initializer="NULL")
static_name = emitter.static_name(module_name, None, prefix=MODULE_PREFIX)
self.declare_global("CPyModule *", static_name)
self.simple_inits.append((static_name, "Py_None"))
def declare_imports(self, imps: Iterable[str], emitter: Emitter) -> None:
for imp in imps:
self.declare_module(imp, emitter)
def declare_finals(
self, module: str, final_names: Iterable[tuple[str, RType]], emitter: Emitter
) -> None:
for name, typ in final_names:
static_name = emitter.static_name(name, module)
emitter.context.declarations[static_name] = HeaderDeclaration(
f"{emitter.ctype_spaced(typ)}{static_name};",
[self.final_definition(module, name, typ, emitter)],
needs_export=True,
)
def final_definition(self, module: str, name: str, typ: RType, emitter: Emitter) -> str:
static_name = emitter.static_name(name, module)
# Here we rely on the fact that undefined value and error value are always the same
undefined = emitter.c_initializer_undefined_value(typ)
return f"{emitter.ctype_spaced(typ)}{static_name} = {undefined};"
def declare_static_pyobject(self, identifier: str, emitter: Emitter) -> None:
symbol = emitter.static_name(identifier, None)
self.declare_global("PyObject *", symbol)
def declare_type_vars(self, module: str, type_var_names: list[str], emitter: Emitter) -> None:
for name in type_var_names:
static_name = emitter.static_name(name, module, prefix=TYPE_VAR_PREFIX)
emitter.context.declarations[static_name] = HeaderDeclaration(
f"PyObject *{static_name};",
[f"PyObject *{static_name} = NULL;"],
needs_export=False,
)
T = TypeVar("T")
def toposort(deps: dict[T, set[T]]) -> list[T]:
"""Topologically sort a dict from item to dependencies.
This runs in O(V + E).
"""
result = []
visited: set[T] = set()
def visit(item: T) -> None:
if item in visited:
return
for child in deps[item]:
visit(child)
result.append(item)
visited.add(item)
for item in deps:
visit(item)
return result
def is_fastcall_supported(fn: FuncIR, capi_version: tuple[int, int]) -> bool:
if fn.class_name is not None:
if fn.name == "__call__":
# We can use vectorcalls (PEP 590) when supported
return True
# TODO: Support fastcall for __init__ and __new__.
return fn.name != "__init__" and fn.name != "__new__"
return True
def collect_literals(fn: FuncIR, literals: Literals) -> None:
"""Store all Python literal object refs in fn.
Collecting literals must happen only after we have the final IR.
This way we won't include literals that have been optimized away.
"""
for block in fn.blocks:
for op in block.ops:
if isinstance(op, LoadLiteral):
literals.record_literal(op.value)
def c_string_array_initializer(components: list[bytes]) -> str:
result = []
result.append("{\n")
for s in components:
result.append(" " + c_string_initializer(s) + ",\n")
result.append("}")
return "".join(result)
| GroupGenerator |
python | scipy__scipy | scipy/optimize/tests/test__basinhopping.py | {
"start": 17628,
"end": 19230
} | class ____:
def setup_method(self):
self.stepsize = 1.
self.ts = RandomDisplacement(stepsize=self.stepsize)
self.target_accept_rate = 0.5
self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
accept_rate=self.target_accept_rate)
def test_adaptive_increase(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(False)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_adaptive_decrease(self):
# if few steps are rejected, the stepsize should increase
x = 0.
self.takestep(x)
self.takestep.report(True)
for i in range(self.takestep.interval):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
def test_all_accepted(self):
# test that everything works OK if all steps were accepted
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(True)
assert_(self.ts.stepsize > self.stepsize)
def test_all_rejected(self):
# test that everything works OK if all steps were rejected
x = 0.
for i in range(self.takestep.interval + 1):
self.takestep(x)
self.takestep.report(False)
assert_(self.ts.stepsize < self.stepsize)
| Test_AdaptiveStepsize |
python | pypa__warehouse | warehouse/oidc/models/google.py | {
"start": 1299,
"end": 3841
} | class ____:
"""
Common functionality for both pending and concrete Google OIDC
providers.
"""
email: Mapped[str] = mapped_column(String, nullable=False)
sub: Mapped[str] = mapped_column(String, nullable=True)
__required_verifiable_claims__: dict[str, CheckClaimCallable[Any]] = {
"email": check_claim_binary(str.__eq__),
"email_verified": check_claim_invariant(True),
}
__optional_verifiable_claims__: dict[str, CheckClaimCallable[Any]] = {
"sub": _check_sub
}
__unchecked_claims__ = {"azp", "google"}
@classmethod
def lookup_by_claims(cls, session: Session, signed_claims: SignedClaims) -> Self:
query: Query = Query(cls).filter_by(email=signed_claims["email"])
publishers = query.with_session(session).all()
if sub := signed_claims.get("sub"):
if specific_publisher := first_true(
publishers, pred=lambda p: p.sub == sub
):
return specific_publisher
if general_publisher := first_true(publishers, pred=lambda p: p.sub == ""):
return general_publisher
raise InvalidPublisherError("Publisher with matching claims was not found")
@property
def publisher_name(self) -> str:
return "Google"
@property
def publisher_base_url(self) -> None:
return None
def publisher_url(self, claims: SignedClaims | None = None) -> None:
return None
@property
def attestation_identity(self) -> Publisher | None:
return GoogleIdentity(email=self.email)
def stored_claims(self, claims: SignedClaims | None = None) -> dict:
return {}
@property
def email_verified(self) -> bool:
# We don't consider a claim set valid unless `email_verified` is true;
# no other states are possible.
return True
def __str__(self) -> str:
return self.email
def exists(self, session: Session) -> bool:
return session.query(
exists().where(
and_(
self.__class__.email == self.email,
self.__class__.sub == self.sub,
)
)
).scalar()
@property
def admin_details(self) -> list[tuple[str, str]]:
"""Returns Google publisher configuration details for admin display."""
details = [
("Email", self.email),
]
if self.sub:
details.append(("Subject", self.sub))
return details
| GooglePublisherMixin |
python | kamyu104__LeetCode-Solutions | Python/integer-replacement.py | {
"start": 509,
"end": 932
} | class ____(object):
def integerReplacement(self, n):
"""
:type n: int
:rtype: int
"""
if n < 4:
return [0, 0, 1, 2][n]
if n % 4 in (0, 2):
return self.integerReplacement(n / 2) + 1
elif n % 4 == 1:
return self.integerReplacement((n - 1) / 4) + 3
else:
return self.integerReplacement((n + 1) / 4) + 3
| Solution2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.