language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | graphql-python__graphene | examples/starwars/schema.py | {
"start": 83,
"end": 159
} | class ____(graphene.Enum):
NEWHOPE = 4
EMPIRE = 5
JEDI = 6
| Episode |
python | huggingface__transformers | src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py | {
"start": 15358,
"end": 16999
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = RobertaPreLayerNormCrossAttention if is_cross_attention else RobertaPreLayerNormSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = RobertaPreLayerNormSelfOutput(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
hidden_states_pre_layer_norm = self.LayerNorm(hidden_states)
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
hidden_states_pre_layer_norm,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
| RobertaPreLayerNormAttention |
python | ray-project__ray | python/ray/serve/tests/test_config_files/grpc_deployment.py | {
"start": 1779,
"end": 2901
} | class ____:
def __init__(
self,
_orange_stand: DeploymentHandle,
_apple_stand: DeploymentHandle,
):
self.directory = {
"ORANGE": _orange_stand,
"APPLE": _apple_stand,
}
async def FruitStand(self, fruit_amounts_proto):
fruit_amounts = {}
if fruit_amounts_proto.orange:
fruit_amounts["ORANGE"] = fruit_amounts_proto.orange
if fruit_amounts_proto.apple:
fruit_amounts["APPLE"] = fruit_amounts_proto.apple
if fruit_amounts_proto.banana:
fruit_amounts["BANANA"] = fruit_amounts_proto.banana
costs = await self.check_price(fruit_amounts)
return serve_pb2.FruitCosts(costs=costs)
async def check_price(self, inputs: Dict[str, int]) -> float:
costs = 0
for fruit, amount in inputs.items():
if fruit not in self.directory:
return
fruit_stand = self.directory[fruit]
costs += await fruit_stand.remote(int(amount))
return costs
@serve.deployment(ray_actor_options={"num_cpus": 0})
| FruitMarket |
python | apache__airflow | providers/fab/src/airflow/providers/fab/www/api_connexion/exceptions.py | {
"start": 3700,
"end": 4193
} | class ____(ProblemException):
"""Raise when the user is not authenticated."""
def __init__(
self,
title: str = "Unauthorized",
detail: str | None = None,
headers: dict | None = None,
**kwargs: Any,
):
super().__init__(
status=HTTPStatus.UNAUTHORIZED,
type=EXCEPTIONS_LINK_MAP[401],
title=title,
detail=detail,
headers=headers,
**kwargs,
)
| Unauthenticated |
python | kamyu104__LeetCode-Solutions | Python/palindrome-partitioning.py | {
"start": 39,
"end": 933
} | class ____(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
is_palindrome = [[False] * len(s) for i in xrange(len(s))]
for i in reversed(xrange(len(s))):
for j in xrange(i, len(s)):
is_palindrome[i][j] = s[i] == s[j] and ((j - i < 2) or is_palindrome[i + 1][j - 1])
sub_partition = [[] for _ in xrange(len(s))]
for i in reversed(xrange(len(s))):
for j in xrange(i, len(s)):
if is_palindrome[i][j]:
if j + 1 < len(s):
for p in sub_partition[j + 1]:
sub_partition[i].append([s[i:j + 1]] + p)
else:
sub_partition[i].append([s[i:j + 1]])
return sub_partition[0]
# Time: O(2^n)
# Space: O(n)
# recursive solution
| Solution |
python | scipy__scipy | scipy/optimize/tests/test__shgo.py | {
"start": 10297,
"end": 12054
} | class ____:
"""
Global optimisation tests with Sobol sampling:
"""
# Sobol algorithm
def test_f1_1_sobol(self):
"""Multivariate test function 1:
x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
run_test(test1_1)
def test_f1_2_sobol(self):
"""Multivariate test function 1:
x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
run_test(test1_2)
def test_f1_3_sobol(self):
"""Multivariate test function 1:
x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]"""
options = {'disp': True}
run_test(test1_3, options=options)
def test_f2_1_sobol(self):
"""Univariate test function on
f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
run_test(test2_1)
def test_f2_2_sobol(self):
"""Univariate test function on
f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
run_test(test2_2)
def test_f3_sobol(self):
"""NLP: Hock and Schittkowski problem 18"""
run_test(test3_1)
@pytest.mark.slow
def test_f4_sobol(self):
"""NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
options = {'infty_constraints': False}
# run_test(test4_1, n=990, options=options)
run_test(test4_1, n=990 * 2, options=options)
def test_f5_1_sobol(self):
"""NLP: Eggholder, multimodal"""
# run_test(test5_1, n=30)
run_test(test5_1, n=60)
def test_f5_2_sobol(self):
"""NLP: Eggholder, multimodal"""
# run_test(test5_1, n=60, iters=5)
run_test(test5_1, n=60, iters=5)
# def test_t911(self):
# """1D tabletop function"""
# run_test(test11_1)
| TestShgoSobolTestFunctions |
python | tiangolo__fastapi | docs_src/cookie_param_models/tutorial001.py | {
"start": 112,
"end": 341
} | class ____(BaseModel):
session_id: str
fatebook_tracker: Union[str, None] = None
googall_tracker: Union[str, None] = None
@app.get("/items/")
async def read_items(cookies: Cookies = Cookie()):
return cookies
| Cookies |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 5966,
"end": 6489
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.ModuleList([])
for _ in range(3):
self.layers.append(
torch.nn.ModuleList(
[
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
]
)
)
def forward(self, x):
for layer, act in self.layers:
x = act(layer(x))
return x
| NestedModuleList |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/repeat_test.py | {
"start": 3568,
"end": 6656
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_repeat_dataset(self,
num_elements,
num_epochs,
num_outputs=None,
options=None):
dataset = dataset_ops.Dataset.range(num_elements).repeat(num_epochs)
if num_outputs:
range_dataset = dataset_ops.Dataset.range(num_outputs)
dataset = dataset_ops.Dataset.zip((dataset, range_dataset))
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def testFiniteRepeat(self, verify_fn, symbolic_checkpoint):
num_elements = 10
num_epochs = 10
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self._build_repeat_dataset(
num_elements, num_epochs, options=options),
num_outputs=(num_elements * num_epochs))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def testEmptyRepeat(self, verify_fn, symbolic_checkpoint):
num_elements = 10
num_epochs = 0
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self._build_repeat_dataset(
num_elements, num_epochs, options=options),
num_outputs=0)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def testInfiniteRepeat(self, verify_fn, symbolic_checkpoint):
num_elements = 10
num_epochs = -1
num_outputs = 100
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self._build_repeat_dataset(
num_elements, num_epochs, num_outputs=num_outputs, options=options),
num_outputs=num_outputs)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def testInfiniteEmptyRepeat(self, verify_fn, symbolic_checkpoint):
num_elements = 0
num_epochs = -1
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self._build_repeat_dataset(
num_elements, num_epochs, options=options),
num_outputs=0)
| RepeatDatasetCheckpointTest |
python | huggingface__transformers | src/transformers/models/ovis2/modeling_ovis2.py | {
"start": 17807,
"end": 20360
} | class ____(Ovis2PreTrainedModel):
config: Ovis2VisionConfig
def __init__(self, config: Ovis2VisionConfig):
super().__init__(config)
self.config = config
self.transformer = Ovis2VisionTransformer(config)
self.num_visual_indicator_tokens = config.num_visual_indicator_tokens
self.vocab_size = config.vocab_size
self.head_linear = nn.Linear(
config.hidden_size * config.hidden_stride * config.hidden_stride,
self.vocab_size - self.num_visual_indicator_tokens,
bias=False,
)
self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens)
def forward(self, pixel_values: torch.FloatTensor, **kwargs) -> tuple[torch.Tensor, torch.Tensor]:
outputs = self.transformer(pixel_values, **kwargs)
last_hidden_state = outputs[0]
if self.config.hidden_stride > 1:
num_images, seq_len, hidden_dim = last_hidden_state.shape
hidden_stride = self.config.hidden_stride
sqrt_l = int(math.sqrt(seq_len))
if sqrt_l * sqrt_l != seq_len:
raise ValueError("Token sequence length must be a perfect square")
pad_size = (hidden_stride - (sqrt_l % hidden_stride)) % hidden_stride
last_hidden_state = nn.functional.pad(last_hidden_state, (0, 0, 0, pad_size, 0, pad_size), "constant", 0)
sqrt_l += pad_size
last_hidden_state = last_hidden_state.reshape(
num_images, sqrt_l // hidden_stride, hidden_stride, sqrt_l // hidden_stride, hidden_stride, hidden_dim
)
last_hidden_state = last_hidden_state.permute(0, 1, 3, 2, 4, 5)
last_hidden_state = last_hidden_state.reshape(
num_images, -1, hidden_stride * hidden_stride * hidden_dim
) # (n, (sqrt_l//hs)^2, hs^2*d)
logits = self.head_linear(last_hidden_state)
logits = self.head_norm(logits)
if self.config.tokenize_function == "gumbel_argmax":
prob_token = nn.functional.gumbel_softmax(logits, dim=-1, hard=True)
elif self.config.tokenize_function == "st_argmax":
prob_token = hard_softmax(logits, dim=-1)
elif self.config.tokenize_function == "softmax":
prob_token = nn.functional.softmax(logits, dim=-1)
return prob_token
@auto_docstring(
custom_intro="""
The Ovis2 model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
| Ovis2VisionModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F821_27.py | {
"start": 1492,
"end": 1533
} | class ____: ...
# More circular references
| D |
python | doocs__leetcode | solution/2500-2599/2577.Minimum Time to Visit a Cell In a Grid/Solution.py | {
"start": 0,
"end": 807
} | class ____:
def minimumTime(self, grid: List[List[int]]) -> int:
if grid[0][1] > 1 and grid[1][0] > 1:
return -1
m, n = len(grid), len(grid[0])
dist = [[inf] * n for _ in range(m)]
dist[0][0] = 0
q = [(0, 0, 0)]
dirs = (-1, 0, 1, 0, -1)
while 1:
t, i, j = heappop(q)
if i == m - 1 and j == n - 1:
return t
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n:
nt = t + 1
if nt < grid[x][y]:
nt = grid[x][y] + (grid[x][y] - nt) % 2
if nt < dist[x][y]:
dist[x][y] = nt
heappush(q, (nt, x, y))
| Solution |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 95997,
"end": 96313
} | class ____:
xlConsolidation = 3 # from enum XlPivotTableSourceType
xlDatabase = 1 # from enum XlPivotTableSourceType
xlExternal = 2 # from enum XlPivotTableSourceType
xlPivotTable = -4148 # from enum XlPivotTableSourceType
xlScenario = 4 # from enum XlPivotTableSourceType
| PivotTableSourceType |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 212920,
"end": 214560
} | class ____(Operation):
def call(self, condition, x1=None, x2=None):
return backend.numpy.where(condition, x1, x2)
def compute_output_spec(self, condition, x1, x2):
condition_shape = getattr(condition, "shape", [])
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(condition_shape, x1_shape)
output_shape = broadcast_shapes(output_shape, x2_shape)
output_dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1) if x1 is not None else "int"),
getattr(x2, "dtype", type(x2) if x2 is not None else "int"),
)
return KerasTensor(output_shape, dtype=output_dtype)
@keras_export(["keras.ops.where", "keras.ops.numpy.where"])
def where(condition, x1=None, x2=None):
"""Return elements chosen from `x1` or `x2` depending on `condition`.
Args:
condition: Where `True`, yield `x1`, otherwise yield `x2`.
x1: Values from which to choose when `condition` is `True`.
x2: Values from which to choose when `condition` is `False`.
Returns:
A tensor with elements from `x1` where `condition` is `True`, and
elements from `x2` where `condition` is `False`.
"""
if (x1 is None and x2 is not None) or (x1 is not None and x2 is None):
raise ValueError(
"`x1` and `x2` either both should be `None`"
" or both should have non-None value."
)
if any_symbolic_tensors((condition, x1, x2)):
return Where().symbolic_call(condition, x1, x2)
return backend.numpy.where(condition, x1, x2)
| Where |
python | getsentry__sentry | src/sentry/replays/validators.py | {
"start": 581,
"end": 3014
} | class ____(serializers.Serializer):
statsPeriod = serializers.CharField(
help_text="""
This defines the range of the time series, relative to now. The range is given in a
`<number><unit>` format. For example `1d` for a one day range. Possible units are `m` for
minutes, `h` for hours, `d` for days and `w` for weeks. You must either provide a
`statsPeriod`, or a `start` and `end`.
""",
required=False,
)
start = serializers.DateTimeField(
help_text="""
This defines the start of the time series range as an explicit datetime, either in UTC
ISO8601 or epoch seconds. Use along with `end` instead of `statsPeriod`.
""",
required=False,
)
end = serializers.DateTimeField(
help_text="""
This defines the inclusive end of the time series range as an explicit datetime, either in
UTC ISO8601 or epoch seconds. Use along with `start` instead of `statsPeriod`.
""",
required=False,
)
field = serializers.MultipleChoiceField(
choices=VALID_FIELD_SET,
help_text="Specifies a field that should be marshaled in the output. Invalid fields will be rejected.",
required=False,
)
project = serializers.ListField(
required=False,
help_text="The ID of the projects to filter by.",
child=serializers.IntegerField(),
)
projectSlug = serializers.ListField(
required=False,
allow_empty=True,
child=serializers.CharField(),
help_text=("A list of project slugs to filter your results by."),
)
environment = serializers.CharField(help_text="The environment to filter by.", required=False)
sort = serializers.CharField(help_text="The field to sort the output by.", required=False)
sortBy = serializers.CharField(help_text="The field to sort the output by.", required=False)
orderBy = serializers.CharField(help_text="The field to sort the output by.", required=False)
query = serializers.CharField(
help_text="A structured query string to filter the output by.", required=False
)
per_page = serializers.IntegerField(
help_text="Limit the number of rows to return in the result.", required=False
)
cursor = serializers.CharField(
help_text="The cursor parameter is used to paginate results. See [here](https://docs.sentry.io/api/pagination/) for how to use this query parameter",
required=False,
)
| ReplayValidator |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/git_test_commit/package.py | {
"start": 217,
"end": 1286
} | class ____(Package):
"""Mock package that tests installing specific commit"""
homepage = "http://www.git-fetch-example.com"
# git='to-be-filled-in-by-test'
# ----------------------------
# -- mock_git_repository, or mock_git_version_info
version("main", branch="main")
# ----------------------------
# -- only mock_git_repository
# (session scope)
version("tag", tag="test-tag")
version("annotated-tag", tag="annotated-tag")
# ----------------------------
# -- only mock_git_version_info below
# (function scope)
version("1.0", tag="v1.0")
version("1.1", tag="v1.1")
version("1.2", tag="1.2") # not a typo
version("2.0", tag="v2.0")
def install(self, spec, prefix):
# It is assumed for the test which installs this package, that it will
# be using the earliest commit, which is contained in the range @:0
assert spec.satisfies("@:0")
mkdir(prefix.bin)
# This will only exist for some second commit
install("file.txt", prefix.bin)
| GitTestCommit |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 514,
"end": 962
} | class ____(MPTTModel):
name = models.CharField(max_length=50)
visible = models.BooleanField(default=True)
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
category_uuid = models.CharField(max_length=50, unique=True, null=True)
def __str__(self):
return self.name
def delete(self):
super().delete()
delete.alters_data = True
| Category |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 20035,
"end": 20375
} | class ____:
x: int
y: int = field(init=False)
def test_does_not_include_no_init_fields_in_dataclass_printing():
record = DataClassWithNoInitField(x=1)
assert pretty.pretty(record) == "DataClassWithNoInitField(x=1)"
record.y = 1
assert pretty.pretty(record) == "DataClassWithNoInitField(x=1)"
| DataClassWithNoInitField |
python | pypa__warehouse | tests/unit/admin/views/test_users.py | {
"start": 3775,
"end": 3953
} | class ____:
def test_validate(self):
form = views.EmailForm(formdata=MultiDict({"email": "foo@bar.net"}))
assert form.validate(), str(form.errors)
| TestEmailForm |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 144254,
"end": 144378
} | class ____(BaseModel, extra="forbid"):
update_vectors: "UpdateVectors" = Field(..., description="")
| UpdateVectorsOperation |
python | wandb__wandb | wandb/vendor/pygments/lexers/modula2.py | {
"start": 524,
"end": 52551
} | class ____(RegexLexer):
"""
For `Modula-2 <http://www.modula2.org/>`_ source code.
The Modula-2 lexer supports several dialects. By default, it operates in
fallback mode, recognising the *combined* literals, punctuation symbols
and operators of all supported dialects, and the *combined* reserved words
and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not
differentiating between library defined identifiers.
To select a specific dialect, a dialect option may be passed
or a dialect tag may be embedded into a source file.
Dialect Options:
`m2pim`
Select PIM Modula-2 dialect.
`m2iso`
Select ISO Modula-2 dialect.
`m2r10`
Select Modula-2 R10 dialect.
`objm2`
Select Objective Modula-2 dialect.
The PIM and ISO dialect options may be qualified with a language extension.
Language Extensions:
`+aglet`
Select Aglet Modula-2 extensions, available with m2iso.
`+gm2`
Select GNU Modula-2 extensions, available with m2pim.
`+p1`
Select p1 Modula-2 extensions, available with m2iso.
`+xds`
Select XDS Modula-2 extensions, available with m2iso.
Passing a Dialect Option via Unix Commandline Interface
Dialect options may be passed to the lexer using the `dialect` key.
Only one such option should be passed. If multiple dialect options are
passed, the first valid option is used, any subsequent options are ignored.
Examples:
`$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input`
Use ISO dialect to render input to HTML output
`$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input`
Use ISO dialect with p1 extensions to render input to RTF output
Embedding a Dialect Option within a source file
A dialect option may be embedded in a source file in form of a dialect
tag, a specially formatted comment that specifies a dialect option.
Dialect Tag EBNF::
dialectTag :
OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
dialectOption :
'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
Prefix : '!' ;
OpeningCommentDelim : '(*' ;
ClosingCommentDelim : '*)' ;
No whitespace is permitted between the tokens of a dialect tag.
In the event that a source file contains multiple dialect tags, the first
tag that contains a valid dialect option will be used and any subsequent
dialect tags will be ignored. Ideally, a dialect tag should be placed
at the beginning of a source file.
An embedded dialect tag overrides a dialect option set via command line.
Examples:
``(*!m2r10*) DEFINITION MODULE Foobar; ...``
Use Modula2 R10 dialect to render this source file.
``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...``
Use PIM dialect with GNU extensions to render this source file.
Algol Publication Mode:
In Algol publication mode, source text is rendered for publication of
algorithms in scientific papers and academic texts, following the format
of the Revised Algol-60 Language Report. It is activated by passing
one of two corresponding styles as an option:
`algol`
render reserved words lowercase underline boldface
and builtins lowercase boldface italic
`algol_nu`
render reserved words lowercase boldface (no underlining)
and builtins lowercase boldface italic
The lexer automatically performs the required lowercase conversion when
this mode is activated.
Example:
``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input``
Render input file in Algol publication mode to LaTeX output.
Rendering Mode of First Class ADT Identifiers:
The rendering of standard library first class ADT identifiers is controlled
by option flag "treat_stdlib_adts_as_builtins".
When this option is turned on, standard library ADT identifiers are rendered
as builtins. When it is turned off, they are rendered as ordinary library
identifiers.
`treat_stdlib_adts_as_builtins` (default: On)
The option is useful for dialects that support ADTs as first class objects
and provide ADTs in the standard library that would otherwise be built-in.
At present, only Modula-2 R10 supports library ADTs as first class objects
and therefore, no ADT identifiers are defined for any other dialects.
Example:
``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...``
Render standard library ADTs as ordinary library types.
.. versionadded:: 1.3
.. versionchanged:: 2.1
Added multi-dialect support.
"""
name = 'Modula-2'
aliases = ['modula2', 'm2']
filenames = ['*.def', '*.mod']
mimetypes = ['text/x-modula2']
flags = re.MULTILINE | re.DOTALL
tokens = {
'whitespace': [
(r'\n+', Text), # blank lines
(r'\s+', Text), # whitespace
],
'dialecttags': [
# PIM Dialect Tag
(r'\(\*!m2pim\*\)', Comment.Special),
# ISO Dialect Tag
(r'\(\*!m2iso\*\)', Comment.Special),
# M2R10 Dialect Tag
(r'\(\*!m2r10\*\)', Comment.Special),
# ObjM2 Dialect Tag
(r'\(\*!objm2\*\)', Comment.Special),
# Aglet Extensions Dialect Tag
(r'\(\*!m2iso\+aglet\*\)', Comment.Special),
# GNU Extensions Dialect Tag
(r'\(\*!m2pim\+gm2\*\)', Comment.Special),
# p1 Extensions Dialect Tag
(r'\(\*!m2iso\+p1\*\)', Comment.Special),
# XDS Extensions Dialect Tag
(r'\(\*!m2iso\+xds\*\)', Comment.Special),
],
'identifiers': [
(r'([a-zA-Z_$][\w$]*)', Name),
],
'prefixed_number_literals': [
#
# Base-2, whole number
(r'0b[01]+(\'[01]+)*', Number.Bin),
#
# Base-16, whole number
(r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex),
],
'plain_number_literals': [
#
# Base-10, real number with exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*' # fractional part
r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent
Number.Float),
#
# Base-10, real number without exponent
(r'[0-9]+(\'[0-9]+)*' # integral part
r'\.[0-9]+(\'[0-9]+)*', # fractional part
Number.Float),
#
# Base-10, whole number
(r'[0-9]+(\'[0-9]+)*', Number.Integer),
],
'suffixed_number_literals': [
#
# Base-8, whole number
(r'[0-7]+B', Number.Oct),
#
# Base-8, character code
(r'[0-7]+C', Number.Oct),
#
# Base-16, number
(r'[0-9A-F]+H', Number.Hex),
],
'string_literals': [
(r"'(\\\\|\\'|[^'])*'", String), # single quoted string
(r'"(\\\\|\\"|[^"])*"', String), # double quoted string
],
'digraph_operators': [
# Dot Product Operator
(r'\*\.', Operator),
# Array Concatenation Operator
(r'\+>', Operator), # M2R10 + ObjM2
# Inequality Operator
(r'<>', Operator), # ISO + PIM
# Less-Or-Equal, Subset
(r'<=', Operator),
# Greater-Or-Equal, Superset
(r'>=', Operator),
# Identity Operator
(r'==', Operator), # M2R10 + ObjM2
# Type Conversion Operator
(r'::', Operator), # M2R10 + ObjM2
# Assignment Symbol
(r':=', Operator),
# Postfix Increment Mutator
(r'\+\+', Operator), # M2R10 + ObjM2
# Postfix Decrement Mutator
(r'--', Operator), # M2R10 + ObjM2
],
'unigraph_operators': [
# Arithmetic Operators
(r'[+-]', Operator),
(r'[*/]', Operator),
# ISO 80000-2 compliant Set Difference Operator
(r'\\', Operator), # M2R10 + ObjM2
# Relational Operators
(r'[=#<>]', Operator),
# Dereferencing Operator
(r'\^', Operator),
# Dereferencing Operator Synonym
(r'@', Operator), # ISO
# Logical AND Operator Synonym
(r'&', Operator), # PIM + ISO
# Logical NOT Operator Synonym
(r'~', Operator), # PIM + ISO
# Smalltalk Message Prefix
(r'`', Operator), # ObjM2
],
'digraph_punctuation': [
# Range Constructor
(r'\.\.', Punctuation),
# Opening Chevron Bracket
(r'<<', Punctuation), # M2R10 + ISO
# Closing Chevron Bracket
(r'>>', Punctuation), # M2R10 + ISO
# Blueprint Punctuation
(r'->', Punctuation), # M2R10 + ISO
# Distinguish |# and # in M2 R10
(r'\|#', Punctuation),
# Distinguish ## and # in M2 R10
(r'##', Punctuation),
# Distinguish |* and * in M2 R10
(r'\|\*', Punctuation),
],
'unigraph_punctuation': [
# Common Punctuation
(r'[()\[\]{},.:;|]', Punctuation),
# Case Label Separator Synonym
(r'!', Punctuation), # ISO
# Blueprint Punctuation
(r'\?', Punctuation), # M2R10 + ObjM2
],
'comments': [
# Single Line Comment
(r'^//.*?\n', Comment.Single), # M2R10 + ObjM2
# Block Comment
(r'\(\*([^$].*?)\*\)', Comment.Multiline),
# Template Block Comment
(r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2
],
'pragmas': [
# ISO Style Pragmas
(r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2
# Pascal Style Pragmas
(r'\(\*\$.*?\*\)', Comment.Preproc), # PIM
],
'root': [
include('whitespace'),
include('dialecttags'),
include('pragmas'),
include('comments'),
include('identifiers'),
include('suffixed_number_literals'), # PIM + ISO
include('prefixed_number_literals'), # M2R10 + ObjM2
include('plain_number_literals'),
include('string_literals'),
include('digraph_punctuation'),
include('digraph_operators'),
include('unigraph_punctuation'),
include('unigraph_operators'),
]
}
# C o m m o n D a t a s e t s
# Common Reserved Words Dataset
common_reserved_words = (
# 37 common reserved words
'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF',
'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT',
'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN',
'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
)
# Common Builtins Dataset
common_builtins = (
# 16 common builtins
'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER',
'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL',
'TRUE',
)
# Common Pseudo-Module Builtins Dataset
common_pseudo_builtins = (
# 4 common pseudo builtins
'ADDRESS', 'BYTE', 'WORD', 'ADR'
)
# P I M M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for PIM Modula-2
pim_lexemes_to_reject = (
'!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.',
'+>', '->', '<<', '>>', '|#', '##',
)
# PIM Modula-2 Additional Reserved Words Dataset
pim_additional_reserved_words = (
# 3 additional reserved words
'EXPORT', 'QUALIFIED', 'WITH',
)
# PIM Modula-2 Additional Builtins Dataset
pim_additional_builtins = (
# 16 additional builtins
'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH',
'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL',
)
# PIM Modula-2 Additional Pseudo-Module Builtins Dataset
pim_additional_pseudo_builtins = (
# 5 additional pseudo builtins
'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER',
)
# I S O M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for ISO Modula-2
iso_lexemes_to_reject = (
'`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->',
'<<', '>>', '|#', '##',
)
# ISO Modula-2 Additional Reserved Words Dataset
iso_additional_reserved_words = (
# 9 additional reserved words (ISO 10514-1)
'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED',
'REM', 'RETRY', 'WITH',
# 10 additional reserved words (ISO 10514-2 & ISO 10514-3)
'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY',
'REVEAL', 'TRACED', 'UNSAFEGUARDED',
)
# ISO Modula-2 Additional Builtins Dataset
iso_additional_builtins = (
# 26 additional builtins (ISO 10514-1)
'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT',
'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH',
'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE',
'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
# 5 additional builtins (ISO 10514-2 & ISO 10514-3)
'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF',
)
# ISO Modula-2 Additional Pseudo-Module Builtins Dataset
iso_additional_pseudo_builtins = (
# 14 additional builtins (SYSTEM)
'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC',
'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR',
'ROTATE', 'SHIFT', 'CAST', 'TSIZE',
# 13 additional builtins (COROUTINES)
'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER',
'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN',
'NEWCOROUTINE', 'PROT', 'TRANSFER',
# 9 additional builtins (EXCEPTIONS)
'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber',
'ExceptionSource', 'GetMessage', 'IsCurrentSource',
'IsExceptionalExecution', 'RAISE',
# 3 additional builtins (TERMINATION)
'TERMINATION', 'IsTerminating', 'HasHalted',
# 4 additional builtins (M2EXCEPTION)
'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception',
'indexException', 'rangeException', 'caseSelectException',
'invalidLocation', 'functionException', 'wholeValueException',
'wholeDivException', 'realValueException', 'realDivException',
'complexValueException', 'complexDivException', 'protException',
'sysException', 'coException', 'exException',
)
# M o d u l a - 2 R 1 0 D a t a s e t s
# Lexemes to Mark as Error Tokens for Modula-2 R10
m2r10_lexemes_to_reject = (
'!', '`', '@', '$', '%', '&', '<>',
)
# Modula-2 R10 reserved words in addition to the common set
m2r10_additional_reserved_words = (
# 12 additional reserved words
'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE',
'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN',
# 2 additional reserved words with symbolic assembly option
'ASM', 'REG',
)
# Modula-2 R10 builtins in addition to the common set
m2r10_additional_builtins = (
# 26 additional builtins
'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD',
'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT',
'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE',
'UNICHAR', 'WRITE', 'WRITEF',
)
# Modula-2 R10 Additional Pseudo-Module Builtins Dataset
m2r10_additional_pseudo_builtins = (
# 13 additional builtins (TPROPERTIES)
'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL',
'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION',
'TMAXEXP', 'TMINEXP',
# 4 additional builtins (CONVERSION)
'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL',
# 35 additional builtins (UNSAFE)
'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC',
'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC',
'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR',
'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT',
'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC',
# 11 additional builtins (ATOMIC)
'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND',
'BWNAND', 'BWOR', 'BWXOR',
# 7 additional builtins (COMPILER)
'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT',
'HASH',
# 5 additional builtins (ASSEMBLER)
'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE',
)
# O b j e c t i v e M o d u l a - 2 D a t a s e t s
# Lexemes to Mark as Error Tokens for Objective Modula-2
objm2_lexemes_to_reject = (
'!', '$', '%', '&', '<>',
)
# Objective Modula-2 Extensions
# reserved words in addition to Modula-2 R10
objm2_additional_reserved_words = (
# 16 additional reserved words
'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
'SUPER', 'TRY',
)
# Objective Modula-2 Extensions
# builtins in addition to Modula-2 R10
objm2_additional_builtins = (
# 3 additional builtins
'OBJECT', 'NO', 'YES',
)
# Objective Modula-2 Extensions
# pseudo-module builtins in addition to Modula-2 R10
objm2_additional_pseudo_builtins = (
# None
)
# A g l e t M o d u l a - 2 D a t a s e t s
# Aglet Extensions
# reserved words in addition to ISO Modula-2
aglet_additional_reserved_words = (
# None
)
# Aglet Extensions
# builtins in addition to ISO Modula-2
aglet_additional_builtins = (
# 9 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32',
)
# Aglet Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
aglet_additional_pseudo_builtins = (
# None
)
# G N U M o d u l a - 2 D a t a s e t s
# GNU Extensions
# reserved words in addition to PIM Modula-2
gm2_additional_reserved_words = (
# 10 additional reserved words
'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
'__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
)
# GNU Extensions
# builtins in addition to PIM Modula-2
gm2_additional_builtins = (
# 21 additional builtins
'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
)
# GNU Extensions
# pseudo-module builtins in addition to PIM Modula-2
gm2_additional_pseudo_builtins = (
# None
)
# p 1 M o d u l a - 2 D a t a s e t s
# p1 Extensions
# reserved words in addition to ISO Modula-2
p1_additional_reserved_words = (
# None
)
# p1 Extensions
# builtins in addition to ISO Modula-2
p1_additional_builtins = (
# None
)
# p1 Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
p1_additional_pseudo_builtins = (
# 1 additional builtin
'BCD',
)
# X D S M o d u l a - 2 D a t a s e t s
# XDS Extensions
# reserved words in addition to ISO Modula-2
xds_additional_reserved_words = (
# 1 additional reserved word
'SEQ',
)
# XDS Extensions
# builtins in addition to ISO Modula-2
xds_additional_builtins = (
# 9 additional builtins
'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN',
'LONGCARD', 'SHORTCARD', 'SHORTINT',
)
# XDS Modula-2 Extensions
# pseudo-module builtins in addition to ISO Modula-2
xds_additional_pseudo_builtins = (
# 22 additional builtins (SYSTEM)
'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8',
'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE',
'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void'
# 3 additional builtins (COMPILER)
'COMPILER', 'OPTION', 'EQUATION'
)
# P I M S t a n d a r d L i b r a r y D a t a s e t s
# PIM Modula-2 Standard Library Modules Dataset
pim_stdlib_module_identifiers = (
'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage',
)
# PIM Modula-2 Standard Library Types Dataset
pim_stdlib_type_identifiers = (
'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission',
'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand',
'DirectoryCommand',
)
# PIM Modula-2 Standard Library Procedures Dataset
pim_stdlib_proc_identifiers = (
'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn',
'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite',
'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset',
'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar',
'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName',
'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput',
'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd',
'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd',
'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp',
'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE',
)
# PIM Modula-2 Standard Library Variables Dataset
pim_stdlib_var_identifiers = (
'Done', 'termCH', 'in', 'out'
)
# PIM Modula-2 Standard Library Constants Dataset
pim_stdlib_const_identifiers = (
'EOL',
)
# I S O S t a n d a r d L i b r a r y D a t a s e t s
# ISO Modula-2 Standard Library Modules Dataset
iso_stdlib_module_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Types Dataset
iso_stdlib_type_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Procedures Dataset
iso_stdlib_proc_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Variables Dataset
iso_stdlib_var_identifiers = (
# TO DO
)
# ISO Modula-2 Standard Library Constants Dataset
iso_stdlib_const_identifiers = (
# TO DO
)
# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s
# Modula-2 R10 Standard Library ADTs Dataset
m2r10_stdlib_adt_identifiers = (
'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET',
'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD',
'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT',
'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64',
'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8',
'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8',
'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16',
'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32',
'INT64', 'INT128', 'STRING', 'UNISTRING',
)
# Modula-2 R10 Standard Library Blueprints Dataset
m2r10_stdlib_blueprint_identifiers = (
'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar',
'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal',
'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray',
'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet',
'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet',
'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension',
'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath',
)
# Modula-2 R10 Standard Library Modules Dataset
m2r10_stdlib_module_identifiers = (
'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO',
'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO',
'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath',
'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath',
'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport',
)
# Modula-2 R10 Standard Library Types Dataset
m2r10_stdlib_type_identifiers = (
'File', 'Status',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Procedures Dataset
m2r10_stdlib_proc_identifiers = (
'ALLOCATE', 'DEALLOCATE', 'SIZE',
# TO BE COMPLETED
)
# Modula-2 R10 Standard Library Variables Dataset
m2r10_stdlib_var_identifiers = (
'stdIn', 'stdOut', 'stdErr',
)
# Modula-2 R10 Standard Library Constants Dataset
m2r10_stdlib_const_identifiers = (
'pi', 'tau',
)
# D i a l e c t s
# Dialect modes
dialects = (
'unknown',
'm2pim', 'm2iso', 'm2r10', 'objm2',
'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds',
)
# D a t a b a s e s
# Lexemes to Mark as Errors Database
lexemes_to_reject_db = {
# Lexemes to reject for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Lexemes to reject for PIM Modula-2
'm2pim': (
pim_lexemes_to_reject,
),
# Lexemes to reject for ISO Modula-2
'm2iso': (
iso_lexemes_to_reject,
),
# Lexemes to reject for Modula-2 R10
'm2r10': (
m2r10_lexemes_to_reject,
),
# Lexemes to reject for Objective Modula-2
'objm2': (
objm2_lexemes_to_reject,
),
# Lexemes to reject for Aglet Modula-2
'm2iso+aglet': (
iso_lexemes_to_reject,
),
# Lexemes to reject for GNU Modula-2
'm2pim+gm2': (
pim_lexemes_to_reject,
),
# Lexemes to reject for p1 Modula-2
'm2iso+p1': (
iso_lexemes_to_reject,
),
# Lexemes to reject for XDS Modula-2
'm2iso+xds': (
iso_lexemes_to_reject,
),
}
# Reserved Words Database
reserved_words_db = {
# Reserved words for unknown dialect
'unknown': (
common_reserved_words,
pim_additional_reserved_words,
iso_additional_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for PIM Modula-2
'm2pim': (
common_reserved_words,
pim_additional_reserved_words,
),
# Reserved words for Modula-2 R10
'm2iso': (
common_reserved_words,
iso_additional_reserved_words,
),
# Reserved words for ISO Modula-2
'm2r10': (
common_reserved_words,
m2r10_additional_reserved_words,
),
# Reserved words for Objective Modula-2
'objm2': (
common_reserved_words,
m2r10_additional_reserved_words,
objm2_additional_reserved_words,
),
# Reserved words for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_reserved_words,
iso_additional_reserved_words,
aglet_additional_reserved_words,
),
# Reserved words for GNU Modula-2 Extensions
'm2pim+gm2': (
common_reserved_words,
pim_additional_reserved_words,
gm2_additional_reserved_words,
),
# Reserved words for p1 Modula-2 Extensions
'm2iso+p1': (
common_reserved_words,
iso_additional_reserved_words,
p1_additional_reserved_words,
),
# Reserved words for XDS Modula-2 Extensions
'm2iso+xds': (
common_reserved_words,
iso_additional_reserved_words,
xds_additional_reserved_words,
),
}
# Builtins Database
builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_builtins,
pim_additional_builtins,
iso_additional_builtins,
m2r10_additional_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_builtins,
pim_additional_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_builtins,
iso_additional_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_builtins,
m2r10_additional_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_builtins,
m2r10_additional_builtins,
objm2_additional_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_builtins,
iso_additional_builtins,
aglet_additional_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_builtins,
pim_additional_builtins,
gm2_additional_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_builtins,
iso_additional_builtins,
p1_additional_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_builtins,
iso_additional_builtins,
xds_additional_builtins,
),
}
# Pseudo-Module Builtins Database
pseudo_builtins_db = {
# Builtins for unknown dialect
'unknown': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
iso_additional_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for PIM Modula-2
'm2pim': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2iso': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
),
# Builtins for ISO Modula-2
'm2r10': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
),
# Builtins for Objective Modula-2
'objm2': (
common_pseudo_builtins,
m2r10_additional_pseudo_builtins,
objm2_additional_pseudo_builtins,
),
# Builtins for Aglet Modula-2 Extensions
'm2iso+aglet': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
aglet_additional_pseudo_builtins,
),
# Builtins for GNU Modula-2 Extensions
'm2pim+gm2': (
common_pseudo_builtins,
pim_additional_pseudo_builtins,
gm2_additional_pseudo_builtins,
),
# Builtins for p1 Modula-2 Extensions
'm2iso+p1': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
p1_additional_pseudo_builtins,
),
# Builtins for XDS Modula-2 Extensions
'm2iso+xds': (
common_pseudo_builtins,
iso_additional_pseudo_builtins,
xds_additional_pseudo_builtins,
),
}
# Standard Library ADTs Database
stdlib_adts_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library ADTs for PIM Modula-2
'm2pim': (
# No first class library types
),
# Standard Library ADTs for ISO Modula-2
'm2iso': (
# No first class library types
),
# Standard Library ADTs for Modula-2 R10
'm2r10': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Objective Modula-2
'objm2': (
m2r10_stdlib_adt_identifiers,
),
# Standard Library ADTs for Aglet Modula-2
'm2iso+aglet': (
# No first class library types
),
# Standard Library ADTs for GNU Modula-2
'm2pim+gm2': (
# No first class library types
),
# Standard Library ADTs for p1 Modula-2
'm2iso+p1': (
# No first class library types
),
# Standard Library ADTs for XDS Modula-2
'm2iso+xds': (
# No first class library types
),
}
# Standard Library Modules Database
stdlib_modules_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Modules for PIM Modula-2
'm2pim': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for ISO Modula-2
'm2iso': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for Modula-2 R10
'm2r10': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
m2r10_stdlib_adt_identifiers,
),
# Standard Library Modules for Objective Modula-2
'objm2': (
m2r10_stdlib_blueprint_identifiers,
m2r10_stdlib_module_identifiers,
),
# Standard Library Modules for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_module_identifiers,
),
# Standard Library Modules for p1 Modula-2
'm2iso+p1': (
iso_stdlib_module_identifiers,
),
# Standard Library Modules for XDS Modula-2
'm2iso+xds': (
iso_stdlib_module_identifiers,
),
}
# Standard Library Types Database
stdlib_types_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Types for PIM Modula-2
'm2pim': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for ISO Modula-2
'm2iso': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for Modula-2 R10
'm2r10': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Objective Modula-2
'objm2': (
m2r10_stdlib_type_identifiers,
),
# Standard Library Types for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_type_identifiers,
),
# Standard Library Types for p1 Modula-2
'm2iso+p1': (
iso_stdlib_type_identifiers,
),
# Standard Library Types for XDS Modula-2
'm2iso+xds': (
iso_stdlib_type_identifiers,
),
}
# Standard Library Procedures Database
stdlib_procedures_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Procedures for PIM Modula-2
'm2pim': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for ISO Modula-2
'm2iso': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for Modula-2 R10
'm2r10': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Objective Modula-2
'objm2': (
m2r10_stdlib_proc_identifiers,
),
# Standard Library Procedures for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_proc_identifiers,
),
# Standard Library Procedures for p1 Modula-2
'm2iso+p1': (
iso_stdlib_proc_identifiers,
),
# Standard Library Procedures for XDS Modula-2
'm2iso+xds': (
iso_stdlib_proc_identifiers,
),
}
# Standard Library Variables Database
stdlib_variables_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Variables for PIM Modula-2
'm2pim': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for ISO Modula-2
'm2iso': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for Modula-2 R10
'm2r10': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Objective Modula-2
'objm2': (
m2r10_stdlib_var_identifiers,
),
# Standard Library Variables for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_var_identifiers,
),
# Standard Library Variables for p1 Modula-2
'm2iso+p1': (
iso_stdlib_var_identifiers,
),
# Standard Library Variables for XDS Modula-2
'm2iso+xds': (
iso_stdlib_var_identifiers,
),
}
# Standard Library Constants Database
stdlib_constants_db = {
# Empty entry for unknown dialect
'unknown': (
# LEAVE THIS EMPTY
),
# Standard Library Constants for PIM Modula-2
'm2pim': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for ISO Modula-2
'm2iso': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for Modula-2 R10
'm2r10': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Objective Modula-2
'objm2': (
m2r10_stdlib_const_identifiers,
),
# Standard Library Constants for Aglet Modula-2
'm2iso+aglet': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for GNU Modula-2
'm2pim+gm2': (
pim_stdlib_const_identifiers,
),
# Standard Library Constants for p1 Modula-2
'm2iso+p1': (
iso_stdlib_const_identifiers,
),
# Standard Library Constants for XDS Modula-2
'm2iso+xds': (
iso_stdlib_const_identifiers,
),
}
# M e t h o d s
# initialise a lexer instance
def __init__(self, **options):
#
# check dialect options
#
dialects = get_list_opt(options, 'dialect', [])
#
for dialect_option in dialects:
if dialect_option in self.dialects[1:-1]:
# valid dialect option found
self.set_dialect(dialect_option)
break
#
# Fallback Mode (DEFAULT)
else:
# no valid dialect option
self.set_dialect('unknown')
#
self.dialect_set_by_tag = False
#
# check style options
#
styles = get_list_opt(options, 'style', [])
#
# use lowercase mode for Algol style
if 'algol' in styles or 'algol_nu' in styles:
self.algol_publication_mode = True
else:
self.algol_publication_mode = False
#
# Check option flags
#
self.treat_stdlib_adts_as_builtins = get_bool_opt(
options, 'treat_stdlib_adts_as_builtins', True)
#
# call superclass initialiser
RegexLexer.__init__(self, **options)
# Set lexer to a specified dialect
def set_dialect(self, dialect_id):
#
# if __debug__:
# print 'entered set_dialect with arg: ', dialect_id
#
# check dialect name against known dialects
if dialect_id not in self.dialects:
dialect = 'unknown' # default
else:
dialect = dialect_id
#
# compose lexemes to reject set
lexemes_to_reject_set = set()
# add each list of reject lexemes for this dialect
for list in self.lexemes_to_reject_db[dialect]:
lexemes_to_reject_set.update(set(list))
#
# compose reserved words set
reswords_set = set()
# add each list of reserved words for this dialect
for list in self.reserved_words_db[dialect]:
reswords_set.update(set(list))
#
# compose builtins set
builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.builtins_db[dialect]:
builtins_set.update(set(list).difference(reswords_set))
#
# compose pseudo-builtins set
pseudo_builtins_set = set()
# add each list of builtins for this dialect excluding reserved words
for list in self.pseudo_builtins_db[dialect]:
pseudo_builtins_set.update(set(list).difference(reswords_set))
#
# compose ADTs set
adts_set = set()
# add each list of ADTs for this dialect excluding reserved words
for list in self.stdlib_adts_db[dialect]:
adts_set.update(set(list).difference(reswords_set))
#
# compose modules set
modules_set = set()
# add each list of builtins for this dialect excluding builtins
for list in self.stdlib_modules_db[dialect]:
modules_set.update(set(list).difference(builtins_set))
#
# compose types set
types_set = set()
# add each list of types for this dialect excluding builtins
for list in self.stdlib_types_db[dialect]:
types_set.update(set(list).difference(builtins_set))
#
# compose procedures set
procedures_set = set()
# add each list of procedures for this dialect excluding builtins
for list in self.stdlib_procedures_db[dialect]:
procedures_set.update(set(list).difference(builtins_set))
#
# compose variables set
variables_set = set()
# add each list of variables for this dialect excluding builtins
for list in self.stdlib_variables_db[dialect]:
variables_set.update(set(list).difference(builtins_set))
#
# compose constants set
constants_set = set()
# add each list of constants for this dialect excluding builtins
for list in self.stdlib_constants_db[dialect]:
constants_set.update(set(list).difference(builtins_set))
#
# update lexer state
self.dialect = dialect
self.lexemes_to_reject = lexemes_to_reject_set
self.reserved_words = reswords_set
self.builtins = builtins_set
self.pseudo_builtins = pseudo_builtins_set
self.adts = adts_set
self.modules = modules_set
self.types = types_set
self.procedures = procedures_set
self.variables = variables_set
self.constants = constants_set
#
# if __debug__:
# print 'exiting set_dialect'
# print ' self.dialect: ', self.dialect
# print ' self.lexemes_to_reject: ', self.lexemes_to_reject
# print ' self.reserved_words: ', self.reserved_words
# print ' self.builtins: ', self.builtins
# print ' self.pseudo_builtins: ', self.pseudo_builtins
# print ' self.adts: ', self.adts
# print ' self.modules: ', self.modules
# print ' self.types: ', self.types
# print ' self.procedures: ', self.procedures
# print ' self.variables: ', self.variables
# print ' self.types: ', self.types
# print ' self.constants: ', self.constants
# Extracts a dialect name from a dialect tag comment string and checks
# the extracted name against known dialects. If a match is found, the
# matching name is returned, otherwise dialect id 'unknown' is returned
def get_dialect_from_dialect_tag(self, dialect_tag):
#
# if __debug__:
# print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag
#
# constants
left_tag_delim = '(*!'
right_tag_delim = '*)'
left_tag_delim_len = len(left_tag_delim)
right_tag_delim_len = len(right_tag_delim)
indicator_start = left_tag_delim_len
indicator_end = -(right_tag_delim_len)
#
# check comment string for dialect indicator
if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \
and dialect_tag.startswith(left_tag_delim) \
and dialect_tag.endswith(right_tag_delim):
#
# if __debug__:
# print 'dialect tag found'
#
# extract dialect indicator
indicator = dialect_tag[indicator_start:indicator_end]
#
# if __debug__:
# print 'extracted: ', indicator
#
# check against known dialects
for index in range(1, len(self.dialects)):
#
# if __debug__:
# print 'dialects[', index, ']: ', self.dialects[index]
#
if indicator == self.dialects[index]:
#
# if __debug__:
# print 'matching dialect found'
#
# indicator matches known dialect
return indicator
else:
# indicator does not match any dialect
return 'unknown' # default
else:
# invalid indicator string
return 'unknown' # default
# intercept the token stream, modify token attributes and return them
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
#
# check for dialect tag if dialect has not been set by tag
if not self.dialect_set_by_tag and token == Comment.Special:
indicated_dialect = self.get_dialect_from_dialect_tag(value)
if indicated_dialect != 'unknown':
# token is a dialect indicator
# reset reserved words and builtins
self.set_dialect(indicated_dialect)
self.dialect_set_by_tag = True
#
# check for reserved words, predefined and stdlib identifiers
if token is Name:
if value in self.reserved_words:
token = Keyword.Reserved
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.builtins:
token = Name.Builtin
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.pseudo_builtins:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.adts:
if not self.treat_stdlib_adts_as_builtins:
token = Name.Namespace
else:
token = Name.Builtin.Pseudo
if self.algol_publication_mode:
value = value.lower()
#
elif value in self.modules:
token = Name.Namespace
#
elif value in self.types:
token = Name.Class
#
elif value in self.procedures:
token = Name.Function
#
elif value in self.variables:
token = Name.Variable
#
elif value in self.constants:
token = Name.Constant
#
elif token in Number:
#
# mark prefix number literals as error for PIM and ISO dialects
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
if "'" in value or value[0:2] in ('0b', '0x', '0u'):
token = Error
#
elif self.dialect in ('m2r10', 'objm2'):
# mark base-8 number literals as errors for M2 R10 and ObjM2
if token is Number.Oct:
token = Error
# mark suffix base-16 literals as errors for M2 R10 and ObjM2
elif token is Number.Hex and 'H' in value:
token = Error
# mark real numbers with E as errors for M2 R10 and ObjM2
elif token is Number.Float and 'E' in value:
token = Error
#
elif token in Comment:
#
# mark single line comment as error for PIM and ISO dialects
if token is Comment.Single:
if self.dialect not in ('unknown', 'm2r10', 'objm2'):
token = Error
#
if token is Comment.Preproc:
# mark ISO pragma as error for PIM dialects
if value.startswith('<*') and \
self.dialect.startswith('m2pim'):
token = Error
# mark PIM pragma as comment for other dialects
elif value.startswith('(*$') and \
self.dialect != 'unknown' and \
not self.dialect.startswith('m2pim'):
token = Comment.Multiline
#
else: # token is neither Name nor Comment
#
# mark lexemes matching the dialect's error token set as errors
if value in self.lexemes_to_reject:
token = Error
#
# substitute lexemes when in Algol mode
if self.algol_publication_mode:
if value == '#':
value = u'≠'
elif value == '<=':
value = u'≤'
elif value == '>=':
value = u'≥'
elif value == '==':
value = u'≡'
elif value == '*.':
value = u'•'
# return result
yield index, token, value
| Modula2Lexer |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 13148,
"end": 13415
} | class ____(str, Enum):
"""
Enum for DAG warning types.
This is the set of allowable values for the ``warning_type`` field
in the DagWarning model.
"""
ASSET_CONFLICT = "asset conflict"
NON_EXISTENT_POOL = "non-existent pool"
| DagWarningType |
python | joerick__pyinstrument | examples/falcon_hello.py | {
"start": 633,
"end": 854
} | class ____:
def on_get(self, req, resp):
time.sleep(1)
resp.media = "hello"
app = falcon.App()
if PROFILING:
app.add_middleware(ProfilerMiddleware())
app.add_route("/", HelloResource())
| HelloResource |
python | getsentry__sentry | src/sentry/uptime/grouptype.py | {
"start": 3379,
"end": 8148
} | class ____(StatefulDetectorHandler[UptimePacketValue, CheckStatus]):
@override
@property
def thresholds(self) -> DetectorThresholds:
recovery_threshold = self.detector.config["recovery_threshold"]
downtime_threshold = self.detector.config["downtime_threshold"]
return {
DetectorPriorityLevel.OK: recovery_threshold,
DetectorPriorityLevel.HIGH: downtime_threshold,
}
@override
def extract_value(self, data_packet: DataPacket[UptimePacketValue]) -> CheckStatus:
return data_packet.packet.check_result["status"]
@override
def build_issue_fingerprint(self, group_key: DetectorGroupKey = None) -> list[str]:
# TODO(epurkhiser): We should migrate the fingerprints over to match
# what the default fingerprint is.
return build_fingerprint(self.detector)
@override
def extract_dedupe_value(self, data_packet: DataPacket[UptimePacketValue]) -> int:
return int(data_packet.packet.check_result["scheduled_check_time_ms"])
@override
def evaluate(
self, data_packet: DataPacket[UptimePacketValue]
) -> dict[DetectorGroupKey, DetectorEvaluationResult]:
result = super().evaluate(data_packet)
if not result:
return result
# Uptime does not use stateful detector value grouping
evaluation = result[None]
uptime_subscription = data_packet.packet.subscription
metric_tags = data_packet.packet.metric_tags
issue_creation_enabled = options.get("uptime.create-issues")
restricted_host_provider_ids = options.get(
"uptime.restrict-issue-creation-by-hosting-provider-id"
)
host_provider_id = uptime_subscription.host_provider_id
host_provider_enabled = host_provider_id not in restricted_host_provider_ids
issue_creation_allowed = issue_creation_enabled and host_provider_enabled
if not host_provider_enabled:
metrics.incr(
"uptime.result_processor.restricted_by_provider",
sample_rate=1.0,
tags={
"host_provider_id": host_provider_id,
**metric_tags,
},
)
result_creates_issue = isinstance(evaluation.result, IssueOccurrence)
result_resolves_issue = isinstance(evaluation.result, StatusChangeMessage)
if result_creates_issue:
metrics.incr(
"uptime.detector.will_create_issue",
tags=metric_tags,
sample_rate=1.0,
)
# XXX(epurkhiser): This logging includes the same extra arguments
# as the `uptime_active_sent_occurrence` log in the consumer for
# legacy creation
logger.info(
"uptime.detector.will_create_issue",
extra={
"project_id": self.detector.project_id,
"url": uptime_subscription.url,
**data_packet.packet.check_result,
},
)
if result_resolves_issue:
metrics.incr(
"uptime.detector.will_resolve_issue",
sample_rate=1.0,
tags=metric_tags,
)
logger.info(
"uptime.detector.will_resolve_issue",
extra={
"project_id": self.detector.project_id,
"url": uptime_subscription.url,
**data_packet.packet.check_result,
},
)
# Reutning an empty dict effectively causes the detector processor to
# bail and not produce an issue occurrence.
if result_creates_issue and not issue_creation_allowed:
return {}
return result
@override
def create_occurrence(
self,
evaluation_result: ProcessedDataConditionGroup,
data_packet: DataPacket[UptimePacketValue],
priority: DetectorPriorityLevel,
) -> tuple[DetectorOccurrence, EventData]:
result = data_packet.packet.check_result
uptime_subscription = data_packet.packet.subscription
occurrence = DetectorOccurrence(
issue_title=f"Downtime detected for {uptime_subscription.url}",
subtitle="Your monitored domain is down",
evidence_display=build_evidence_display(result),
type=UptimeDomainCheckFailure,
level="error",
culprit="", # TODO: The url?
assignee=self.detector.owner,
priority=priority,
)
event_data = build_event_data(result, self.detector)
return (occurrence, event_data)
@dataclass(frozen=True)
| UptimeDetectorHandler |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 1565,
"end": 2497
} | class ____(BaseForm):
checkboxes = forms.MultipleChoiceField(
choices=((1, "Option one"), (2, "Option two"), (3, "Option three")),
initial=(1,),
widget=forms.CheckboxSelectMultiple,
)
alphacheckboxes = forms.MultipleChoiceField(
choices=(("option_one", "Option one"), ("option_two", "Option two"), ("option_three", "Option three")),
initial=("option_two", "option_three"),
widget=forms.CheckboxSelectMultiple,
)
numeric_multiple_checkboxes = forms.MultipleChoiceField(
choices=((1, "Option one"), (2, "Option two"), (3, "Option three")),
initial=(1, 2),
widget=forms.CheckboxSelectMultiple,
)
inline_radios = forms.ChoiceField(
choices=(
("option_one", "Option one"),
("option_two", "Option two"),
),
widget=forms.RadioSelect,
initial="option_two",
)
| CheckboxesSampleForm |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_dc_transforms.py | {
"start": 72506,
"end": 77278
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
"""tests for #8718"""
__dialect__ = "default"
@testing.fixture
def model(self):
def go(use_mixin, use_inherits, mad_setup, dataclass_kw):
if use_mixin:
if mad_setup == "dc, mad":
class BaseEntity(
DeclarativeBase, MappedAsDataclass, **dataclass_kw
):
pass
elif mad_setup == "mad, dc":
class BaseEntity(
MappedAsDataclass, DeclarativeBase, **dataclass_kw
):
pass
elif mad_setup == "subclass":
class BaseEntity(DeclarativeBase):
pass
class IdMixin(MappedAsDataclass):
id: Mapped[int] = mapped_column(
primary_key=True, init=False
)
if mad_setup == "subclass":
class A(
IdMixin, MappedAsDataclass, BaseEntity, **dataclass_kw
):
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "a",
}
__tablename__ = "a"
type: Mapped[str] = mapped_column(String, init=False)
data: Mapped[str] = mapped_column(String, init=False)
else:
class A(IdMixin, BaseEntity):
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "a",
}
__tablename__ = "a"
type: Mapped[str] = mapped_column(String, init=False)
data: Mapped[str] = mapped_column(String, init=False)
else:
if mad_setup == "dc, mad":
class BaseEntity(
DeclarativeBase, MappedAsDataclass, **dataclass_kw
):
id: Mapped[int] = mapped_column(
primary_key=True, init=False
)
elif mad_setup == "mad, dc":
class BaseEntity(
MappedAsDataclass, DeclarativeBase, **dataclass_kw
):
id: Mapped[int] = mapped_column(
primary_key=True, init=False
)
elif mad_setup == "subclass":
class BaseEntity(MappedAsDataclass, DeclarativeBase):
id: Mapped[int] = mapped_column(
primary_key=True, init=False
)
if mad_setup == "subclass":
class A(BaseEntity, **dataclass_kw):
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "a",
}
__tablename__ = "a"
type: Mapped[str] = mapped_column(String, init=False)
data: Mapped[str] = mapped_column(String, init=False)
else:
class A(BaseEntity):
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "a",
}
__tablename__ = "a"
type: Mapped[str] = mapped_column(String, init=False)
data: Mapped[str] = mapped_column(String, init=False)
if use_inherits:
class B(A):
__mapper_args__ = {
"polymorphic_identity": "b",
}
b_data: Mapped[str] = mapped_column(String, init=False)
return B
else:
return A
yield go
@testing.combinations("inherits", "plain", argnames="use_inherits")
@testing.combinations("mixin", "base", argnames="use_mixin")
@testing.combinations(
"mad, dc", "dc, mad", "subclass", argnames="mad_setup"
)
def test_mapping(self, model, use_inherits, use_mixin, mad_setup):
target_cls = model(
use_inherits=use_inherits == "inherits",
use_mixin=use_mixin == "mixin",
mad_setup=mad_setup,
dataclass_kw={},
)
obj = target_cls()
assert "id" not in obj.__dict__
| MixinColumnTest |
python | plotly__plotly.py | plotly/graph_objs/layout/slider/_currentvalue.py | {
"start": 235,
"end": 6059
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.slider"
_path_str = "layout.slider.currentvalue"
_valid_props = {"font", "offset", "prefix", "suffix", "visible", "xanchor"}
@property
def font(self):
"""
Sets the font of the current value label text.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.slider.currentvalue.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.slider.currentvalue.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def offset(self):
"""
The amount of space, in pixels, between the current value label
and the slider.
The 'offset' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["offset"]
@offset.setter
def offset(self, val):
self["offset"] = val
@property
def prefix(self):
"""
When currentvalue.visible is true, this sets the prefix of the
label.
The 'prefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["prefix"]
@prefix.setter
def prefix(self, val):
self["prefix"] = val
@property
def suffix(self):
"""
When currentvalue.visible is true, this sets the suffix of the
label.
The 'suffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["suffix"]
@suffix.setter
def suffix(self, val):
self["suffix"] = val
@property
def visible(self):
"""
Shows the currently-selected value above the slider.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def xanchor(self):
"""
The alignment of the value readout relative to the length of
the slider.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets the font of the current value label text.
offset
The amount of space, in pixels, between the current
value label and the slider.
prefix
When currentvalue.visible is true, this sets the prefix
of the label.
suffix
When currentvalue.visible is true, this sets the suffix
of the label.
visible
Shows the currently-selected value above the slider.
xanchor
The alignment of the value readout relative to the
length of the slider.
"""
def __init__(
self,
arg=None,
font=None,
offset=None,
prefix=None,
suffix=None,
visible=None,
xanchor=None,
**kwargs,
):
"""
Construct a new Currentvalue object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Currentvalue`
font
Sets the font of the current value label text.
offset
The amount of space, in pixels, between the current
value label and the slider.
prefix
When currentvalue.visible is true, this sets the prefix
of the label.
suffix
When currentvalue.visible is true, this sets the suffix
of the label.
visible
Shows the currently-selected value above the slider.
xanchor
The alignment of the value readout relative to the
length of the slider.
Returns
-------
Currentvalue
"""
super().__init__("currentvalue")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.slider.Currentvalue
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Currentvalue`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("offset", arg, offset)
self._set_property("prefix", arg, prefix)
self._set_property("suffix", arg, suffix)
self._set_property("visible", arg, visible)
self._set_property("xanchor", arg, xanchor)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Currentvalue |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/resnet.py | {
"start": 2341,
"end": 4562
} | class ____(nn.Module):
"""ResNetV1."""
stage_sizes: Sequence[int]
block_cls: ModuleDef
num_classes: int
num_filters: int = 64
dtype: Any = jnp.float32
act: Callable = nn.relu
conv: ModuleDef = nn.Conv
@nn.compact
def __call__(self, x, train: bool = True):
conv = partial(self.conv, use_bias=False, dtype=self.dtype)
norm = partial(nn.BatchNorm,
use_running_average=not train,
momentum=0.9,
epsilon=1e-5,
dtype=self.dtype)
x = conv(self.num_filters, (7, 7), (2, 2),
padding=[(3, 3), (3, 3)],
name='conv_init')(x)
x = norm(name='bn_init')(x)
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')
for i, block_size in enumerate(self.stage_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = self.block_cls(self.num_filters * 2 ** i,
strides=strides,
conv=conv,
norm=norm,
act=self.act)(x)
x = jnp.mean(x, axis=(1, 2))
x = nn.Dense(self.num_classes, dtype=self.dtype)(x)
x = jnp.asarray(x, self.dtype)
return x
ResNet18 = partial(ResNet, stage_sizes=[2, 2, 2, 2],
block_cls=ResNetBlock)
ResNet34 = partial(ResNet, stage_sizes=[3, 4, 6, 3],
block_cls=ResNetBlock)
ResNet50 = partial(ResNet, stage_sizes=[3, 4, 6, 3],
block_cls=BottleneckResNetBlock)
ResNet101 = partial(ResNet, stage_sizes=[3, 4, 23, 3],
block_cls=BottleneckResNetBlock)
ResNet152 = partial(ResNet, stage_sizes=[3, 8, 36, 3],
block_cls=BottleneckResNetBlock)
ResNet200 = partial(ResNet, stage_sizes=[3, 24, 36, 3],
block_cls=BottleneckResNetBlock)
ResNet18Local = partial(ResNet, stage_sizes=[2, 2, 2, 2],
block_cls=ResNetBlock, conv=nn.ConvLocal)
# Used for testing only.
_ResNet1 = partial(ResNet, stage_sizes=[1], block_cls=ResNetBlock)
_ResNet1Local = partial(ResNet, stage_sizes=[1], block_cls=ResNetBlock,
conv=nn.ConvLocal)
| ResNet |
python | encode__django-rest-framework | tests/test_middleware.py | {
"start": 789,
"end": 985
} | class ____(APIView):
def get(self, request):
return Response(data="OK", status=200)
@api_view(['GET'])
def get_func_view(request):
return Response(data="OK", status=200)
| GetAPIView |
python | pytorch__pytorch | torch/fx/passes/infra/pass_base.py | {
"start": 324,
"end": 729
} | class ____(namedtuple("PassResult", ["graph_module", "modified"])):
"""
Result of a pass:
graph_module: The modified graph module
modified: A flag for if the pass has modified the graph module
"""
__slots__ = ()
def __new__(cls, graph_module, modified):
return super().__new__(cls, graph_module, modified)
@compatibility(is_backward_compatible=False)
| PassResult |
python | pytorch__pytorch | torch/distributed/flight_recorder/components/types.py | {
"start": 4117,
"end": 5310
} | class ____(NamedTuple):
groups: list[Group]
memberships: list[Membership]
tracebacks: list[Traceback]
collectives: list[Collective]
ncclcalls: list[NCCLCall]
# TODO: We need to add a schema for the following
types = [
TypeInfo.from_type(t) # type: ignore[type-var]
for t in [Database, NCCLCall, Collective, Traceback, Membership, Group]
if (
isinstance(t, type)
and issubclass(t, tuple)
and hasattr(t, "_fields")
and t is not TypeInfo
)
]
"""
Stacktrace cache
TODO
"""
"""
Collective Matching logic
NOTE: For now, these collectives need to be supported by NCCL,
https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/overview.html.
"""
COLLECTIVES = {
"broadcast",
"_broadcast_oop",
"reduce",
"_reduce_oop",
"all_gather",
"all_reduce",
"_all_gather_base",
"all_gather_into_tensor_coalesced",
"reduce_scatter",
"reduce_scatter_tensor_coalesced",
"_reduce_scatter_base",
"gather",
"scatter",
"all_to_all",
"all_reduce_barrier",
"allreduce_coalesced",
"ALLGATHER_coalesced",
"REDUCE_SCATTER_coalesced",
}
P2P = {
"send",
"recv",
}
| Database |
python | django__django | tests/get_or_create/models.py | {
"start": 525,
"end": 629
} | class ____(models.Model):
person = models.ForeignKey(Person, models.CASCADE, primary_key=True)
| Profile |
python | ray-project__ray | rllib/connectors/module_to_env/module_to_env_pipeline.py | {
"start": 150,
"end": 207
} | class ____(ConnectorPipelineV2):
pass
| ModuleToEnvPipeline |
python | getsentry__sentry | src/sentry/objectstore/endpoints/organization.py | {
"start": 394,
"end": 1513
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
"PUT": ApiPublishStatus.EXPERIMENTAL,
"DELETE": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.FOUNDATIONAL_STORAGE
def get(self, request: Request, organization: Organization) -> Response:
if not features.has("organizations:objectstore-endpoint", organization, actor=request.user):
return Response(status=404)
# TODO: implement
return Response(status=200)
def put(self, request: Request, organization: Organization) -> Response:
if not features.has("organizations:objectstore-endpoint", organization, actor=request.user):
return Response(status=404)
# TODO: implement
return Response(status=200)
def delete(self, request: Request, organization: Organization) -> Response:
if not features.has("organizations:objectstore-endpoint", organization, actor=request.user):
return Response(status=404)
# TODO: implement
return Response(status=200)
| OrganizationObjectstoreEndpoint |
python | pytorch__pytorch | torch/distributed/algorithms/_comm_hooks/default_hooks.py | {
"start": 1353,
"end": 7616
} | class ____(DefaultState):
r"""
Stores state needed to perform gradient communication in a lower precision within a communication hook.
Communication hook will cast gradients back to the original
parameter precision specified by ``parameter_type`` (default: torch.float32).
Builds on top of the :class:`DefaultState`.
Args:
parameter_type (torch.dtype): The precision of model's parameters.
Required for a hook to cast gradients back to a parameter's precision.
"""
__slots__ = [
"parameter_type",
]
def __init__(
self,
process_group,
parameter_type=torch.float32,
):
super().__init__(process_group)
self.parameter_type = parameter_type
def _decompress(state: LowPrecisionState, grad: torch.Tensor):
"""
Casts gradients back to full parameter precision so that further computation happens in full precision.
"""
orig_grad_data = grad.data
grad.data = grad.data.to(state.parameter_type)
device_type = ""
try:
if grad.device.type == "privateuse1":
device_type = torch._C._get_privateuse1_backend_name()
else:
device_type = grad.device.type
backend = getattr(torch, device_type)
except AttributeError as e:
raise AttributeError(
f"Device {grad.device} does not have a \
corresponding backend registered as 'torch.device_type'."
) from e
# Don't let this memory get reused until after the transfer.
orig_grad_data.record_stream(backend.current_stream()) # type: ignore[arg-type]
def allreduce_hook(state: DefaultState, grad: torch.Tensor):
r"""
Implement the FSDP communication hook for ``all_reduce`` algorithm and a necessary pre- and post-division of gradients.
Args:
state (DefaultState): State information, configures pre- and post-division factors.
grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks.
"""
# Average grad by pre-division factor. Together pre- and post-division factors
# lead to an overall averaging by world_size, required for consistency with PyTorch DDP.
# This is a two-step process to avoid potential underflow and overflow.
if state.gradient_predivide_factor > 1:
grad.div_(state.gradient_predivide_factor)
dist.all_reduce(grad, group=state.process_group)
# Average grad by post-division factor.
if state.gradient_postdivide_factor > 1:
grad.div_(state.gradient_postdivide_factor)
def reduce_scatter_hook(state: DefaultState, grad: torch.Tensor, output: torch.Tensor):
r"""
Implement the FSDP communication hook for ``reduce_scatter`` algorithm.
For sharded FSDP strategies and a necessary pre- and post-division of gradients.
Args:
state (DefaultState): State information, configures pre- and post-division factors.
grad (torch.Tensor): An unsharded gradient for the local batch that needs to be
communicated across ranks.
output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``.
"""
# Average grad by pre-division factor.
if state.gradient_predivide_factor > 1:
grad.div_(state.gradient_predivide_factor)
dist.reduce_scatter_tensor(output, grad, group=state.process_group)
# Average grad's shard by post-division factor.
if state.gradient_postdivide_factor > 1:
output.div_(state.gradient_postdivide_factor)
def _low_precision_hook(
prec: torch.dtype,
state: LowPrecisionState,
grad: torch.Tensor,
output: torch.Tensor | None,
):
if grad.dtype != prec:
grad.data = grad.data.to(prec)
if output is not None:
if output.dtype != prec:
output.data = output.data.to(prec)
reduce_scatter_hook(state, grad, output)
_decompress(state, output)
else:
allreduce_hook(state, grad)
_decompress(state, grad)
def fp16_compress_hook(
state: LowPrecisionState, grad: torch.Tensor, output: torch.Tensor | None = None
):
r"""
Implement FSDP communication hook for a simple gradient compression approach.
Casts ``grad`` to half-precision floating-point format (``torch.float16``).
It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a
``state.gradient_predivide_factor``, and after a communication step (``all_reduce`` or ``reduce_scatter``)
gradients are averaged by a ``state.gradient_postdivide_factor``.
Once post-division is done, compressed gradients are casted back to parameters' precision.
Args:
state (LowPrecisionState): State information, configures pre- and post-division factors, parameters' precision.
grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision.
output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``.
"""
fp16_hook = functools.partial(_low_precision_hook, torch.float16)
return fp16_hook(state, grad, output)
def bf16_compress_hook(
state: LowPrecisionState, grad: torch.Tensor, output: torch.Tensor | None = None
):
r"""
Implement FSDP communication hook for a simple gradient compression approach .
Casts ``grad`` to half-precision floating-point format.
It also averages gradients by ``world_size`` in two steps: first it pre-divides gradients by a
``state.gradient_predivide_factor``, and after a communication step (``all_reduce`` or ``reduce_scatter``)
gradients are averaged by a ``state.gradient_postdivide_factor``.
Once post-division is done, compressed gradients are casted back to parameters' precision.
Args:
state (LowPrecisionState): State information, configures pre- and post-division factors, parameters' precision.
grad (torch.Tensor): A gradient for the local batch that needs to be communicated across ranks in a lower precision.
output (torch.Tensor): Stores a single shard of the gradient after ``reduce_scatter``.
"""
bf16_hook = functools.partial(_low_precision_hook, torch.bfloat16)
return bf16_hook(state, grad, output)
| LowPrecisionState |
python | kamyu104__LeetCode-Solutions | Python/minimize-result-by-adding-parentheses-to-expression.py | {
"start": 64,
"end": 1352
} | class ____(object):
def minimizeResult(self, expression):
"""
:type expression: str
:rtype: str
"""
def stoi(s, i, j):
result = 0
for k in xrange(i, j):
result = result*10+(ord(s[k])-ord('0'))
return result
best = None
min_val = float("inf")
pos = expression.index('+')
left, right = stoi(expression, 0, pos), stoi(expression, pos+1, len(expression))
base1, base2_init = 10**pos, 10**(len(expression)-(pos+1)-1)
for i in xrange(pos):
base2 = base2_init
for j in xrange(pos+1, len(expression)):
a, b = divmod(left, base1)
c, d = divmod(right, base2)
val = max(a, 1)*(b+c)*max(d, 1)
if val < min_val:
min_val = val
best = (i, j)
base2 //= 10
base1 //= 10
return "".join(itertools.chain((expression[i] for i in xrange(best[0])),
'(', (expression[i] for i in xrange(best[0], best[1]+1)), ')',
(expression[i] for i in xrange(best[1]+1, len(expression)))))
# Time: O(n^2)
# Space: O(n)
# brute force
| Solution |
python | plotly__plotly.py | plotly/graph_objs/sankey/_link.py | {
"start": 233,
"end": 28100
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sankey"
_path_str = "sankey.link"
_valid_props = {
"arrowlen",
"color",
"colorscaledefaults",
"colorscales",
"colorsrc",
"customdata",
"customdatasrc",
"hovercolor",
"hovercolorsrc",
"hoverinfo",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"label",
"labelsrc",
"line",
"source",
"sourcesrc",
"target",
"targetsrc",
"value",
"valuesrc",
}
@property
def arrowlen(self):
"""
Sets the length (in px) of the links arrow, if 0 no arrow will
be drawn.
The 'arrowlen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["arrowlen"]
@arrowlen.setter
def arrowlen(self, val):
self["arrowlen"] = val
@property
def color(self):
"""
Sets the `link` color. It can be a single value, or an array
for specifying color for each `link`. If `link.color` is
omitted, then by default, a translucent grey link will be used.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorscales(self):
"""
The 'colorscales' property is a tuple of instances of
Colorscale that may be specified as:
- A list or tuple of instances of plotly.graph_objs.sankey.link.Colorscale
- A list or tuple of dicts of string/value properties that
will be passed to the Colorscale constructor
Returns
-------
tuple[plotly.graph_objs.sankey.link.Colorscale]
"""
return self["colorscales"]
@colorscales.setter
def colorscales(self, val):
self["colorscales"] = val
@property
def colorscaledefaults(self):
"""
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults), sets the
default property values to use for elements of
sankey.link.colorscales
The 'colorscaledefaults' property is an instance of Colorscale
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Colorscale`
- A dict of string/value properties that will be passed
to the Colorscale constructor
Returns
-------
plotly.graph_objs.sankey.link.Colorscale
"""
return self["colorscaledefaults"]
@colorscaledefaults.setter
def colorscaledefaults(self, val):
self["colorscaledefaults"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def customdata(self):
"""
Assigns extra data to each link.
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def hovercolor(self):
"""
Sets the `link` hover color. It can be a single value, or an
array for specifying hover colors for each `link`. If
`link.hovercolor` is omitted, then by default, links will
become slightly more opaque when hovered over.
The 'hovercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovercolor"]
@hovercolor.setter
def hovercolor(self, val):
self["hovercolor"] = val
@property
def hovercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovercolor`.
The 'hovercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovercolorsrc"]
@hovercolorsrc.setter
def hovercolorsrc(self, val):
self["hovercolorsrc"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear when hovering links.
If `none` or `skip` are set, no information is displayed upon
hovering. But, if `none` is set, click and hover events are
still fired.
The 'hoverinfo' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'none', 'skip']
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.sankey.link.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables `value`
and `label`. Anything contained in tag `<extra>` is displayed
in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def label(self):
"""
The shown name of the link.
The 'label' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def labelsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `label`.
The 'labelsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelsrc"]
@labelsrc.setter
def labelsrc(self, val):
self["labelsrc"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.link.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.sankey.link.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def source(self):
"""
An integer number `[0..nodes.length - 1]` that represents the
source node.
The 'source' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
@property
def sourcesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `source`.
The 'sourcesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sourcesrc"]
@sourcesrc.setter
def sourcesrc(self, val):
self["sourcesrc"] = val
@property
def target(self):
"""
An integer number `[0..nodes.length - 1]` that represents the
target node.
The 'target' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["target"]
@target.setter
def target(self, val):
self["target"] = val
@property
def targetsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `target`.
The 'targetsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["targetsrc"]
@targetsrc.setter
def targetsrc(self, val):
self["targetsrc"] = val
@property
def value(self):
"""
A numeric value representing the flow volume value.
The 'value' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def valuesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `value`.
The 'valuesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuesrc"]
@valuesrc.setter
def valuesrc(self, val):
self["valuesrc"] = val
@property
def _prop_descriptions(self):
return """\
arrowlen
Sets the length (in px) of the links arrow, if 0 no
arrow will be drawn.
color
Sets the `link` color. It can be a single value, or an
array for specifying color for each `link`. If
`link.color` is omitted, then by default, a translucent
grey link will be used.
colorscales
A tuple of
:class:`plotly.graph_objects.sankey.link.Colorscale`
instances or dicts with compatible properties
colorscaledefaults
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults),
sets the default property values to use for elements of
sankey.link.colorscales
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
customdata
Assigns extra data to each link.
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hovercolor
Sets the `link` hover color. It can be a single value,
or an array for specifying hover colors for each
`link`. If `link.hovercolor` is omitted, then by
default, links will become slightly more opaque when
hovered over.
hovercolorsrc
Sets the source reference on Chart Studio Cloud for
`hovercolor`.
hoverinfo
Determines which trace information appear when hovering
links. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.link.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `value` and `label`. Anything contained in
tag `<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
label
The shown name of the link.
labelsrc
Sets the source reference on Chart Studio Cloud for
`label`.
line
:class:`plotly.graph_objects.sankey.link.Line` instance
or dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on Chart Studio Cloud for
`source`.
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on Chart Studio Cloud for
`target`.
value
A numeric value representing the flow volume value.
valuesrc
Sets the source reference on Chart Studio Cloud for
`value`.
"""
def __init__(
self,
arg=None,
arrowlen=None,
color=None,
colorscales=None,
colorscaledefaults=None,
colorsrc=None,
customdata=None,
customdatasrc=None,
hovercolor=None,
hovercolorsrc=None,
hoverinfo=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
label=None,
labelsrc=None,
line=None,
source=None,
sourcesrc=None,
target=None,
targetsrc=None,
value=None,
valuesrc=None,
**kwargs,
):
"""
Construct a new Link object
The links of the Sankey plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Link`
arrowlen
Sets the length (in px) of the links arrow, if 0 no
arrow will be drawn.
color
Sets the `link` color. It can be a single value, or an
array for specifying color for each `link`. If
`link.color` is omitted, then by default, a translucent
grey link will be used.
colorscales
A tuple of
:class:`plotly.graph_objects.sankey.link.Colorscale`
instances or dicts with compatible properties
colorscaledefaults
When used in a template (as
layout.template.data.sankey.link.colorscaledefaults),
sets the default property values to use for elements of
sankey.link.colorscales
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
customdata
Assigns extra data to each link.
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
hovercolor
Sets the `link` hover color. It can be a single value,
or an array for specifying hover colors for each
`link`. If `link.hovercolor` is omitted, then by
default, links will become slightly more opaque when
hovered over.
hovercolorsrc
Sets the source reference on Chart Studio Cloud for
`hovercolor`.
hoverinfo
Determines which trace information appear when hovering
links. If `none` or `skip` are set, no information is
displayed upon hovering. But, if `none` is set, click
and hover events are still fired.
hoverlabel
:class:`plotly.graph_objects.sankey.link.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `value` and `label`. Anything contained in
tag `<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
label
The shown name of the link.
labelsrc
Sets the source reference on Chart Studio Cloud for
`label`.
line
:class:`plotly.graph_objects.sankey.link.Line` instance
or dict with compatible properties
source
An integer number `[0..nodes.length - 1]` that
represents the source node.
sourcesrc
Sets the source reference on Chart Studio Cloud for
`source`.
target
An integer number `[0..nodes.length - 1]` that
represents the target node.
targetsrc
Sets the source reference on Chart Studio Cloud for
`target`.
value
A numeric value representing the flow volume value.
valuesrc
Sets the source reference on Chart Studio Cloud for
`value`.
Returns
-------
Link
"""
super().__init__("link")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sankey.Link
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Link`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("arrowlen", arg, arrowlen)
self._set_property("color", arg, color)
self._set_property("colorscales", arg, colorscales)
self._set_property("colorscaledefaults", arg, colorscaledefaults)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("hovercolor", arg, hovercolor)
self._set_property("hovercolorsrc", arg, hovercolorsrc)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("label", arg, label)
self._set_property("labelsrc", arg, labelsrc)
self._set_property("line", arg, line)
self._set_property("source", arg, source)
self._set_property("sourcesrc", arg, sourcesrc)
self._set_property("target", arg, target)
self._set_property("targetsrc", arg, targetsrc)
self._set_property("value", arg, value)
self._set_property("valuesrc", arg, valuesrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Link |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/redshift/resources.py | {
"start": 9056,
"end": 9169
} | class ____(RedshiftClient):
"""This class was used by the function-style Redshift resource."""
| RedshiftResource |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 7434,
"end": 7900
} | class ____:
"""Test pt_PT bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{21}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == PtPtBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{21}", iban[2:])
| TestPtPt |
python | crytic__slither | slither/core/expressions/unary_operation.py | {
"start": 3209,
"end": 4357
} | class ____(Expression):
def __init__(
self,
expression: Union[Literal, Identifier, IndexAccess, TupleExpression],
expression_type: UnaryOperationType,
) -> None:
assert isinstance(expression, Expression)
super().__init__()
self._expression: Expression = expression
self._type: UnaryOperationType = expression_type
if expression_type in [
UnaryOperationType.DELETE,
UnaryOperationType.PLUSPLUS_PRE,
UnaryOperationType.MINUSMINUS_PRE,
UnaryOperationType.PLUSPLUS_POST,
UnaryOperationType.MINUSMINUS_POST,
]:
expression.set_lvalue()
@property
def expression(self) -> Expression:
return self._expression
@property
def type(self) -> UnaryOperationType:
return self._type
@property
def is_prefix(self) -> bool:
return UnaryOperationType.is_prefix(self._type)
def __str__(self) -> str:
if self.is_prefix:
return str(self.type) + " " + str(self._expression)
return str(self._expression) + " " + str(self.type)
| UnaryOperation |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 12316,
"end": 12901
} | class ____(AttributeHandler):
minimum_path_length = 2
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
if path[1] in (
"screen_density",
"screen_dpi",
"screen_height_pixels",
"screen_width_pixels",
):
contexts = event.data.get("contexts", {})
device = contexts.get("device")
if device is None:
device = []
return [device.get(path[1])]
return []
@attribute_registry.register("unreal")
| DeviceAttributeHandler |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 1448,
"end": 1570
} | class ____:
def analyze(self, laparams):
"""Perform the layout analysis."""
return
## LTText
##
| LTItem |
python | google__jax | jax/_src/pallas/pipelining/internal.py | {
"start": 1440,
"end": 1739
} | class ____:
max_in_flight: int
is_async_start: bool
is_async_done: bool
def __post_init__(self):
if self.is_async_start and self.is_async_done:
raise ValueError(
"Async start and async done are mutually exclusive.")
@dataclasses.dataclass(frozen=True)
| SchedulingProperties |
python | ansible__ansible | lib/ansible/utils/encrypt.py | {
"start": 3478,
"end": 7879
} | class ____(BaseHash):
algorithms = {
**BaseHash.algorithms,
'yescrypt': _Algo(crypt_id='y', salt_size=16, implicit_rounds=5, rounds_format='cost', requires_gensalt=True, salt_exact=True),
}
def __init__(self, algorithm: str) -> None:
super(CryptHash, self).__init__(algorithm)
if not HAS_CRYPT:
raise AnsibleError("crypt cannot be used as the 'libxcrypt' library is not installed or is unusable.") from CRYPT_E
if algorithm not in self.algorithms:
raise AnsibleError(f"crypt does not support {self.algorithm!r} algorithm")
self.algo_data = self.algorithms[algorithm]
if self.algo_data.requires_gensalt and not _crypt_facade.has_crypt_gensalt:
raise AnsibleError(f"{self.algorithm!r} algorithm requires libxcrypt")
def hash(self, secret: str, salt: str | None = None, salt_size: int | None = None, rounds: int | None = None, ident: str | None = None) -> str:
rounds = self._rounds(rounds)
ident = self._ident(ident)
if _crypt_facade.has_crypt_gensalt:
saltstring = self._gensalt(ident, rounds, salt, salt_size)
else:
saltstring = self._build_saltstring(ident, rounds, salt, salt_size)
return self._hash(secret, saltstring)
def _validate_salt_size(self, salt_size: int | None) -> int:
if salt_size is not None and not isinstance(salt_size, int):
raise TypeError('salt_size must be an integer')
salt_size = salt_size or self.algo_data.salt_size
if self.algo_data.salt_exact and salt_size != self.algo_data.salt_size:
raise AnsibleError(f"invalid salt size supplied ({salt_size}), expected {self.algo_data.salt_size}")
elif not self.algo_data.salt_exact and salt_size > self.algo_data.salt_size:
raise AnsibleError(f"invalid salt size supplied ({salt_size}), expected at most {self.algo_data.salt_size}")
return salt_size
def _salt(self, salt: str | None, salt_size: int | None) -> str:
salt_size = self._validate_salt_size(salt_size)
ret = salt or random_salt(salt_size)
if not set(ret).issubset(_VALID_SALT_CHARS):
raise AnsibleError("invalid characters in salt")
if self.algo_data.salt_exact and len(ret) != self.algo_data.salt_size:
raise AnsibleError(f"invalid salt size supplied ({len(ret)}), expected {self.algo_data.salt_size}")
elif not self.algo_data.salt_exact and len(ret) > self.algo_data.salt_size:
raise AnsibleError(f"invalid salt size supplied ({len(ret)}), expected at most {self.algo_data.salt_size}")
return ret
def _rounds(self, rounds: int | None) -> int | None:
return rounds or self.algo_data.implicit_rounds
def _ident(self, ident: str | None) -> str | None:
return ident or self.algo_data.crypt_id
def _gensalt(self, ident: str, rounds: int | None, salt: str | None, salt_size: int | None) -> str:
if salt is None:
salt_size = self._validate_salt_size(salt_size)
rbytes = secrets.token_bytes(salt_size)
else:
salt = self._salt(salt, salt_size)
rbytes = to_bytes(salt)
prefix = f'${ident}$'
count = rounds or 0
try:
salt_bytes = _crypt_facade.crypt_gensalt(to_bytes(prefix), count, rbytes)
return to_text(salt_bytes, errors='strict')
except (NotImplementedError, ValueError) as e:
raise AnsibleError(f"Failed to generate salt for {self.algorithm!r} algorithm") from e
def _build_saltstring(self, ident: str, rounds: int | None, salt: str | None, salt_size: int | None) -> str:
salt = self._salt(salt, salt_size)
saltstring = f'${ident}' if ident else ''
if rounds:
if self.algo_data.rounds_format == 'cost':
saltstring += f'${rounds}'
else:
saltstring += f'$rounds={rounds}'
saltstring += f'${salt}'
return saltstring
def _hash(self, secret: str, saltstring: str) -> str:
try:
result = _crypt_facade.crypt(to_bytes(secret), to_bytes(saltstring))
except (OSError, ValueError) as e:
raise AnsibleError(f"crypt does not support {self.algorithm!r} algorithm") from e
return to_text(result, errors='strict')
| CryptHash |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/xent_op_test.py | {
"start": 1271,
"end": 3221
} | class ____(xent_op_test_base.XentOpTestBase):
@test_util.run_deprecated_v1
def testRankTooLarge(self):
for dtype in np.float16, np.float32:
np_features = np.array([[[1., 1., 1., 1.]], [[1., 2., 3.,
4.]]]).astype(dtype)
np_labels = np.array([[[0., 0., 0., 1.]], [[0., .5, .5,
0.]]]).astype(dtype)
self.assertRaisesRegex(ValueError, "rank 2, but is rank 3",
gen_nn_ops.softmax_cross_entropy_with_logits,
np_features, np_labels)
def testFeaturesBroadcast(self):
np_f = np.array([[1., 2., 3., 4.],
[1., 2., 3., 4.]]).astype(np.float32)
np_l = np.array([[0., 0., 0., 1.],
[0., .5, .5, 0.]]).astype(np.float32)
np_loss, np_gradient = self._npXent(labels=np_l, logits=np_f)
tf_f = constant_op.constant(
np.array([[1., 2., 3., 4.]]).astype(np.float32))
tf_l = constant_op.constant(
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
tf_loss, tf_gradient = gen_nn_ops.softmax_cross_entropy_with_logits(
tf_f, tf_l)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_gradient, tf_gradient)
tf_f = constant_op.constant(np.array([[1.]]).astype(np.float32))
tf_l = constant_op.constant(np.array([[1.], [1.]]).astype(np.float32))
tf_loss, tf_gradient = gen_nn_ops.softmax_cross_entropy_with_logits(
tf_f, tf_l)
self.assertAllClose([0, 0], tf_loss)
self.assertAllCloseAccordingToType([[0], [0]], tf_gradient)
@test_util.run_deprecated_v1
def testNotMatrix(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
[0., 1., 0., 1.])
| XentOpTest |
python | apache__airflow | providers/slack/tests/unit/slack/notifications/test_slack.py | {
"start": 1064,
"end": 6611
} | class ____:
@mock.patch("airflow.providers.slack.notifications.slack.SlackHook")
@pytest.mark.parametrize(
("extra_kwargs", "hook_extra_kwargs"),
[
pytest.param({}, DEFAULT_HOOKS_PARAMETERS, id="default-hook-parameters"),
pytest.param(
{
"base_url": "https://foo.bar",
"timeout": 42,
"proxy": "http://spam.egg",
"retry_handlers": [],
},
{
"base_url": "https://foo.bar",
"timeout": 42,
"proxy": "http://spam.egg",
"retry_handlers": [],
},
id="with-extra-hook-parameters",
),
],
)
def test_slack_notifier(self, mock_slack_hook, create_dag_without_db, extra_kwargs, hook_extra_kwargs):
notifier = send_slack_notification(slack_conn_id="test_conn_id", text="test", **extra_kwargs)
notifier({"dag": create_dag_without_db("test_slack_notifier")})
mock_slack_hook.return_value.call.assert_called_once_with(
"chat.postMessage",
json={
"channel": "#general",
"username": "Airflow",
"text": "test",
"icon_url": "https://raw.githubusercontent.com/apache/airflow/main/airflow-core"
"/src/airflow/ui/public/pin_100.png",
"attachments": "[]",
"blocks": "[]",
"unfurl_links": True,
"unfurl_media": True,
},
)
mock_slack_hook.assert_called_once_with(slack_conn_id="test_conn_id", **hook_extra_kwargs)
@mock.patch("airflow.providers.slack.notifications.slack.SlackHook")
def test_slack_notifier_with_notifier_class(self, mock_slack_hook, create_dag_without_db):
notifier = SlackNotifier(text="test")
notifier({"dag": create_dag_without_db("test_slack_notifier")})
mock_slack_hook.return_value.call.assert_called_once_with(
"chat.postMessage",
json={
"channel": "#general",
"username": "Airflow",
"text": "test",
"icon_url": "https://raw.githubusercontent.com/apache/airflow/main/airflow-core"
"/src/airflow/ui/public/pin_100.png",
"attachments": "[]",
"blocks": "[]",
"unfurl_links": True,
"unfurl_media": True,
},
)
@mock.patch("airflow.providers.slack.notifications.slack.SlackHook")
def test_slack_notifier_templated(self, mock_slack_hook, create_dag_without_db):
notifier = send_slack_notification(
text="test {{ username }}",
channel="#test-{{dag.dag_id}}",
attachments=[{"image_url": "{{ dag.dag_id }}.png"}],
)
context = {"dag": create_dag_without_db("test_slack_notifier")}
notifier(context)
mock_slack_hook.return_value.call.assert_called_once_with(
"chat.postMessage",
json={
"channel": "#test-test_slack_notifier",
"username": "Airflow",
"text": "test Airflow",
"icon_url": "https://raw.githubusercontent.com/apache/airflow/main/airflow-core"
"/src/airflow/ui/public/pin_100.png",
"attachments": '[{"image_url": "test_slack_notifier.png"}]',
"blocks": "[]",
"unfurl_links": True,
"unfurl_media": True,
},
)
@mock.patch("airflow.providers.slack.notifications.slack.SlackHook")
def test_slack_notifier_unfurl_options(self, mock_slack_hook, create_dag_without_db):
notifier = send_slack_notification(
text="test",
unfurl_links=False,
unfurl_media=False,
)
notifier({"dag": create_dag_without_db("test_slack_notifier")})
mock_slack_hook.return_value.call.assert_called_once_with(
"chat.postMessage",
json={
"channel": "#general",
"username": "Airflow",
"text": "test",
"icon_url": "https://raw.githubusercontent.com/apache/airflow/main/airflow-core"
"/src/airflow/ui/public/pin_100.png",
"attachments": "[]",
"blocks": "[]",
"unfurl_links": False,
"unfurl_media": False,
},
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.slack.notifications.slack.SlackHook")
async def test_async_slack_notifier(self, mock_slack_hook):
mock_slack_hook.return_value.async_call = mock.AsyncMock()
notifier = send_slack_notification(
text="test",
unfurl_links=False,
unfurl_media=False,
)
await notifier.async_notify({})
mock_slack_hook.return_value.async_call.assert_called_once_with(
"chat.postMessage",
json={
"channel": "#general",
"username": "Airflow",
"text": "test",
"icon_url": "https://raw.githubusercontent.com/apache/airflow/main/airflow-core"
"/src/airflow/ui/public/pin_100.png",
"attachments": "[]",
"blocks": "[]",
"unfurl_links": False,
"unfurl_media": False,
},
)
| TestSlackNotifier |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/serializer.py | {
"start": 786,
"end": 8557
} | class ____:
# 'id' and 3+ numbers, but not 000
ANCHOR_TEMPLATE = 'id%03d'
ANCHOR_RE = RegExp('id(?!000$)\\d{3,}')
def __init__(
self,
encoding=None,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
dumper=None,
):
# type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA
self.dumper = dumper
if self.dumper is not None:
self.dumper._serializer = self
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
if isinstance(version, str):
self.use_version = tuple(map(int, version.split('.')))
else:
self.use_version = version # type: ignore
self.use_tags = tags
self.serialized_nodes = {} # type: Dict[Any, Any]
self.anchors = {} # type: Dict[Any, Any]
self.last_anchor_id = 0
self.closed = None # type: Optional[bool]
self._templated_id = None
@property
def emitter(self):
# type: () -> Any
if hasattr(self.dumper, 'typ'):
return self.dumper.emitter
return self.dumper._emitter
@property
def resolver(self):
# type: () -> Any
if hasattr(self.dumper, 'typ'):
self.dumper.resolver
return self.dumper._resolver
def open(self):
# type: () -> None
if self.closed is None:
self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError('serializer is closed')
else:
raise SerializerError('serializer is already opened')
def close(self):
# type: () -> None
if self.closed is None:
raise SerializerError('serializer is not opened')
elif not self.closed:
self.emitter.emit(StreamEndEvent())
self.closed = True
# def __del__(self):
# self.close()
def serialize(self, node):
# type: (Any) -> None
if dbg(DBG_NODE):
nprint('Serializing nodes')
node.dump()
if self.closed is None:
raise SerializerError('serializer is not opened')
elif self.closed:
raise SerializerError('serializer is closed')
self.emitter.emit(
DocumentStartEvent(
explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags
)
)
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
# type: (Any) -> None
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
anchor = None
try:
if node.anchor.always_dump:
anchor = node.anchor.value
except: # NOQA
pass
self.anchors[node] = anchor
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
# type: (Any) -> Any
try:
anchor = node.anchor.value
except: # NOQA
anchor = None
if anchor is None:
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
return anchor
def serialize_node(self, node, parent, index):
# type: (Any, Any, Any) -> None
alias = self.anchors[node]
if node in self.serialized_nodes:
node_style = getattr(node, 'style', None)
if node_style != '?':
node_style = None
self.emitter.emit(AliasEvent(alias, style=node_style))
else:
self.serialized_nodes[node] = True
self.resolver.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
# here check if the node.tag equals the one that would result from parsing
# if not equal quoting is necessary for strings
detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True))
implicit = (
(node.tag == detected_tag),
(node.tag == default_tag),
node.tag.startswith('tag:yaml.org,2002:'),
)
self.emitter.emit(
ScalarEvent(
alias,
node.tag,
implicit,
node.value,
style=node.style,
comment=node.comment,
)
)
elif isinstance(node, SequenceNode):
implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True)
comment = node.comment
end_comment = None
seq_comment = None
if node.flow_style is True:
if comment: # eol comment on flow style sequence
seq_comment = comment[0]
# comment[0] = None
if comment and len(comment) > 2:
end_comment = comment[2]
else:
end_comment = None
self.emitter.emit(
SequenceStartEvent(
alias,
node.tag,
implicit,
flow_style=node.flow_style,
comment=node.comment,
)
)
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
elif isinstance(node, MappingNode):
implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True)
comment = node.comment
end_comment = None
map_comment = None
if node.flow_style is True:
if comment: # eol comment on flow style sequence
map_comment = comment[0]
# comment[0] = None
if comment and len(comment) > 2:
end_comment = comment[2]
self.emitter.emit(
MappingStartEvent(
alias,
node.tag,
implicit,
flow_style=node.flow_style,
comment=node.comment,
nr_items=len(node.value),
)
)
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment]))
self.resolver.ascend_resolver()
def templated_id(s):
# type: (Text) -> Any
return Serializer.ANCHOR_RE.match(s)
| Serializer |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/routers/test_builder_ingress.py | {
"start": 6273,
"end": 12640
} | class ____:
@pytest.fixture
def llm_config(self):
"""Basic LLMConfig for testing."""
return LLMConfig(
model_loading_config=ModelLoadingConfig(
model_id="test-model", model_source="test-source"
)
)
def test_build_openai_app(
self, get_llm_serve_args, shutdown_ray_and_serve, disable_placement_bundles
):
"""Test `build_openai_app` can build app and run it with Serve."""
app = build_openai_app(
get_llm_serve_args,
)
assert isinstance(app, serve.Application)
serve.run(app)
def test_build_openai_app_with_config(
self,
serve_config_separate_model_config_files,
shutdown_ray_and_serve,
disable_placement_bundles,
):
"""Test `build_openai_app` can be used in serve config."""
def deployments_healthy():
status_response = subprocess.check_output(["serve", "status"])
print("[TEST] Status response: ", status_response)
applications = extract_applications_from_output(status_response)
if "llm-endpoint" not in applications:
print("[TEST] Application 'llm-endpoint' not found.")
return False
llm_endpoint_status = applications["llm-endpoint"]
if len(llm_endpoint_status["deployments"]) != 2:
print(
f"[TEST] Expected 2 deployments, found {len(llm_endpoint_status['deployments'])}"
)
return False
deployment_status = llm_endpoint_status["deployments"].values()
if not all([status["status"] == "HEALTHY" for status in deployment_status]):
print(f"[TEST] Not all deployments healthy: {deployment_status}")
return False
print("[TEST] All deployments healthy.")
return True
p = subprocess.Popen(["serve", "run", serve_config_separate_model_config_files])
wait_for_condition(deployments_healthy, timeout=60, retry_interval_ms=1000)
p.send_signal(signal.SIGINT) # Equivalent to ctrl-C
p.wait()
def test_router_built_with_autoscaling_configs(self, disable_placement_bundles):
"""Test that the router is built with the correct autoscaling configs that
will scale.
"""
llm_config_no_autoscaling_configured = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="model_id_1"),
accelerator_type="L4",
)
llm_config_autoscaling_default = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="model_id_2"),
accelerator_type="L4",
deployment_config={"autoscaling_config": AutoscalingConfig()},
)
llm_config_autoscaling_non_default = LLMConfig(
model_loading_config=ModelLoadingConfig(model_id="model_id_3"),
accelerator_type="L4",
deployment_config={
"autoscaling_config": AutoscalingConfig(
min_replicas=2,
initial_replicas=3,
max_replicas=4,
)
},
)
app = build_openai_app(
LLMServingArgs(
llm_configs=[
llm_config_no_autoscaling_configured,
llm_config_autoscaling_default,
llm_config_autoscaling_non_default,
],
ingress_deployment_config={
"autoscaling_config": {
"min_replicas": 8,
"initial_replicas": 10,
"max_replicas": 12,
"target_ongoing_requests": 10,
}
},
)
)
router_autoscaling_config = (
app._bound_deployment._deployment_config.autoscaling_config
)
assert router_autoscaling_config.min_replicas == 8 # (1 + 1 + 2) * 2
assert router_autoscaling_config.initial_replicas == 10 # (1 + 1 + 3) * 2
assert router_autoscaling_config.max_replicas == 12 # (1 + 1 + 4) * 2
assert router_autoscaling_config.target_ongoing_requests == 10
def test_ingress_deployment_config_merging(
self, llm_config, disable_placement_bundles
):
"""Test that ingress_deployment_config is properly merged with default options.
This test ensures that deep_merge_dicts return value is properly assigned
and that nested dictionaries are properly deep-merged without losing default values.
"""
# Build app with custom ingress deployment config including nested options
app = build_openai_app(
dict(
llm_configs=[llm_config],
ingress_deployment_config={
"num_replicas": 3,
"ray_actor_options": {
"num_cpus": 4,
"memory": 1024,
},
"max_ongoing_requests": 200, # Override default
},
)
)
# Verify the custom config was applied
deployment = app._bound_deployment
assert deployment._deployment_config.num_replicas == 3
assert deployment.ray_actor_options["num_cpus"] == 4
assert deployment.ray_actor_options["memory"] == 1024
assert deployment._deployment_config.max_ongoing_requests == 200
def extract_applications_from_output(output: bytes) -> dict:
"""
Extracts the 'applications' block from mixed output and returns it as a dict.
"""
# 1. Decode bytes to string
text = output.decode("utf-8", errors="ignore")
# 2. Regex to find the 'applications:' block and its indented content
# This matches 'applications:' and all following lines that are indented (YAML block)
match = re.search(r"(^applications:\n(?:^(?: {2,}|\t).*\n?)+)", text, re.MULTILINE)
if not match:
raise ValueError("Could not find 'applications:' block in output.")
applications_block = match.group(1)
# 3. Parse the YAML block
applications_dict = yaml.safe_load(applications_block)
return applications_dict["applications"]
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestBuildOpenaiApp |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/rpc_test.py | {
"start": 4485,
"end": 9608
} | class ____:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception("t_view is contiguous!") # noqa: TRY002
if not t_cont.is_contiguous():
raise Exception("t_cont is not contiguous!") # noqa: TRY002
if not torch.equal(t_view, t_cont):
raise Exception("t_view is not equal to t_cont!") # noqa: TRY002
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for v in dict_input.values():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def identity(a):
return a
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())),
rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(
dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor())
)
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
# Note that it needs to inherit from Exception, not BaseException. See comment
# in rpc/internal.py
| MyClass |
python | huggingface__transformers | src/transformers/models/roberta/modular_roberta.py | {
"start": 5846,
"end": 5904
} | class ____(BertLayer):
pass
@auto_docstring
| RobertaLayer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self2.py | {
"start": 3331,
"end": 4006
} | class ____:
scale: float = 1.0
def set_scale(self, scale: float) -> ReturnConcreteShape:
return ReturnConcreteShape()
def accepts_shape(shape: ShapeProtocol) -> None:
y = shape.set_scale(0.5)
reveal_type(y)
def main(
return_self_shape: ReturnSelf,
return_concrete_shape: ReturnConcreteShape,
bad_return_type: BadReturnType,
return_different_class: ReturnDifferentClass,
) -> None:
accepts_shape(return_self_shape)
accepts_shape(return_concrete_shape)
# This should generate an error.
accepts_shape(bad_return_type)
# This should generate an error.
accepts_shape(return_different_class)
| ReturnDifferentClass |
python | joke2k__faker | faker/providers/bank/es_MX/__init__.py | {
"start": 767,
"end": 7785
} | class ____(BankProvider):
"""Bank provider for ``es_MX`` locale."""
banks: Tuple[str, ...] = (
"ABC Capital, S.A. I.B.M.",
"Acciones y Valores Banamex, S.A. de C.V., Casa de Bolsa",
"Actinver Casa de Bolsa, S.A. de C.V.",
"Akala, S.A. de C.V., Sociedad Financiera Popular",
"American Express Bank (México), S.A.",
"AXA Seguros, S.A. De C.V.",
"B y B Casa de Cambio, S.A. de C.V.",
"Banca Afirme, S.A.",
"Banca Mifel, S.A.",
"Banco Actinver, S.A.",
"Banco Ahorro Famsa, S.A.",
"Banco Autofin México, S.A.",
"Banco Azteca, S.A.",
"Banco BASE, S.A. de I.B.M.",
"Banco Compartamos, S.A.",
"Banco Credit Suisse (México), S.A.",
"Banco del Ahorro Nacional y Servicios Financieros, S.N.C.",
"Banco del Bajío, S.A.",
"Banco Inbursa, S.A.",
"Banco Inmobiliario Mexicano, S.A., Institución de Banca Múltiple",
"Banco Interacciones, S.A.",
"Banco Invex, S.A.",
"Banco J.P. Morgan, S.A.",
"Banco Mercantil del Norte, S.A.",
"Banco Monex, S.A.",
"Banco Multiva, S.A.",
"Banco Nacional de Comercio Exterior",
"Banco Nacional de México, S.A.",
"Banco Nacional de Obras y Servicios Públicos",
"Banco Nacional del Ejército, Fuerza Aérea y Armada",
"Banco PagaTodo S.A., Institución de Banca Múltiple",
"Banco Regional de Monterrey, S.A.",
"Banco Sabadell, S.A. I.B.M.",
"Banco Santander, S.A.",
"Banco Ve por Mas, S.A.",
"Banco Wal Mart de México Adelante, S.A.",
"BanCoppel, S.A.",
"Bank of America México, S.A.",
"Bank of Tokyo-Mitsubishi UFJ (México), S.A.",
"Bankaool, S.A., Institución de Banca Múltiple",
"Bansi, S.A.",
"Barclays Bank México, S.A.",
"BBVA Bancomer, S.A.",
"Bulltick Casa de Bolsa, S.A. de C.V.",
"Caja Popular Mexicana, S.C. de A.P. de R.L. De C.V.",
"Casa de Bolsa Finamex, S.A. de C.V.",
"Casa de Cambio Tíber, S.A. de C.V.",
"CI Casa de Bolsa, S.A. de C.V.",
"CLS Bank International",
"Consubanco, S.A.",
"Consultoría Internacional Banco, S.A.",
"Consultoría Internacional Casa de Cambio, S.A. de C.V.",
"Deutsche Bank México, S.A.",
"Deutsche Securities, S.A. de C.V.",
"Estructuradores del Mercado de Valores Casa de Bolsa, S.A. de C.V.",
"Evercore Casa de Bolsa, S.A. de C.V.",
"Financiera Nacional De Desarrollo Agropecuario, Rural, F y P.",
"Fincomún, Servicios Financieros Comunitarios, S.A. de C.V.",
"GBM Grupo Bursátil Mexicano, S.A. de C.V.",
"GE Money Bank, S.A.",
"HDI Seguros, S.A. de C.V.",
"Hipotecaria su Casita, S.A. de C.V.",
"HSBC México, S.A.",
"Industrial and Commercial Bank of China, S.A., Institución de Banca Múltiple",
"ING Bank (México), S.A.",
"Inter Banco, S.A.",
"Intercam Casa de Bolsa, S.A. de C.V.",
"Intercam Casa de Cambio, S.A. de C.V.",
"Inversora Bursátil, S.A. de C.V.",
"IXE Banco, S.A.",
"J.P. Morgan Casa de Bolsa, S.A. de C.V.",
"J.P. SOFIEXPRESS, S.A. de C.V., S.F.P.",
"Kuspit Casa de Bolsa, S.A. de C.V.",
"Libertad Servicios Financieros, S.A. De C.V.",
"MAPFRE Tepeyac S.A.",
"Masari Casa de Bolsa, S.A.",
"Merrill Lynch México, S.A. de C.V., Casa de Bolsa",
"Monex Casa de Bolsa, S.A. de C.V.",
"Multivalores Casa de Bolsa, S.A. de C.V. Multiva Gpo. Fin.",
"Nacional Financiera, S.N.C.",
"Opciones Empresariales Del Noreste, S.A. DE C.V.",
"OPERADORA ACTINVER, S.A. DE C.V.",
"Operadora De Pagos Móviles De México, S.A. De C.V.",
"Operadora de Recursos Reforma, S.A. de C.V.",
"OrderExpress Casa de Cambio , S.A. de C.V. AAC",
"Profuturo G.N.P., S.A. de C.V.",
"Scotiabank Inverlat, S.A.",
"SD. INDEVAL, S.A. de C.V.",
"Seguros Monterrey New York Life, S.A de C.V.",
"Sistema de Transferencias y Pagos STP, S.A. de C.V., SOFOM E.N.R.",
"Skandia Operadora S.A. de C.V.",
"Skandia Vida S.A. de C.V.",
"Sociedad Hipotecaria Federal, S.N.C.",
"Solución Asea, S.A. de C.V., Sociedad Financiera Popular",
"Sterling Casa de Cambio, S.A. de C.V.",
"Telecomunicaciones de México",
"The Royal Bank of Scotland México, S.A.",
"UBS Banco, S.A.",
"UNAGRA, S.A. de C.V., S.F.P.",
"Única Casa de Cambio, S.A. de C.V.",
"Valores Mexicanos Casa de Bolsa, S.A. de C.V.",
"Valué, S.A. de C.V., Casa de Bolsa",
"Vector Casa de Bolsa, S.A. de C.V.",
"Volkswagen Bank S.A. Institución de Banca Múltiple",
"Zúrich Compañía de Seguros, S.A.",
"Zúrich Vida, Compañía de Seguros, S.A.",
)
bank_codes: Tuple[int, ...] = (
2,
6,
9,
12,
14,
19,
21,
22,
30,
32,
36,
37,
42,
44,
58,
59,
60,
62,
72,
102,
103,
106,
108,
110,
112,
113,
116,
124,
126,
127,
128,
129,
130,
131,
132,
133,
134,
135,
136,
137,
138,
139,
140,
141,
143,
145,
147,
148,
150,
155,
156,
166,
168,
600,
601,
602,
604,
605,
606,
607,
608,
610,
611,
613,
614,
615,
616,
617,
618,
619,
620,
621,
622,
623,
624,
626,
627,
628,
629,
630,
631,
632,
633,
634,
636,
637,
638,
640,
642,
646,
647,
648,
649,
651,
652,
653,
655,
656,
659,
670,
674,
677,
679,
684,
901,
902,
)
def clabe(self, bank_code: Optional[int] = None) -> str:
"""Generate a mexican bank account CLABE.
Sources:
- https://en.wikipedia.org/wiki/CLABE
:return: A fake CLABE number.
:sample:
:sample: bank_code=2
"""
bank = bank_code or self.random_element(self.bank_codes)
city = self.random_int(0, 999)
branch = self.random_int(0, 9999)
account = self.random_int(0, 9999999)
result = f"{bank:03d}{city:03d}{branch:04d}{account:07d}"
control_digit = get_clabe_control_digit(result)
return result + str(control_digit)
| Provider |
python | ansible__ansible | lib/ansible/plugins/strategy/__init__.py | {
"start": 2464,
"end": 8355
} | class ____:
pass
_sentinel = StrategySentinel()
if t.TYPE_CHECKING:
from ansible.inventory.host import Host
def _get_item_vars(result, task):
item_vars = {}
if task.loop or task.loop_with:
loop_var = result.get('ansible_loop_var', 'item')
index_var = result.get('ansible_index_var')
if loop_var in result:
item_vars[loop_var] = result[loop_var]
if index_var and index_var in result:
item_vars[index_var] = result[index_var]
if '_ansible_item_label' in result:
item_vars['_ansible_item_label'] = result['_ansible_item_label']
if 'ansible_loop' in result:
item_vars['ansible_loop'] = result['ansible_loop']
return item_vars
def results_thread_main(strategy: StrategyBase) -> None:
value: object
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, DisplaySend):
dmethod = getattr(display, result.method)
dmethod(*result.args, **result.kwargs)
elif isinstance(result, CallbackSend):
task_result = strategy._convert_wire_task_result_to_raw(result.wire_task_result)
strategy._tqm.send_callback(result.method_name, task_result)
elif isinstance(result, _WireTaskResult):
result = strategy._convert_wire_task_result_to_raw(result)
with strategy._results_lock:
strategy._results.append(result)
elif isinstance(result, PromptSend):
try:
value = display.prompt_until(
result.prompt,
private=result.private,
seconds=result.seconds,
complete_input=result.complete_input,
interrupt_input=result.interrupt_input,
)
except AnsibleError as e:
value = e
except BaseException as e:
# relay unexpected errors so bugs in display are reported and don't cause workers to hang
try:
raise AnsibleError(f"{e}") from e
except AnsibleError as e:
value = e
strategy._workers[result.worker_id].worker_queue.put(value)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (OSError, EOFError):
break
except queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator: PlayIterator, one_pass: bool = False, max_passes: int | None = None) -> list[_RawTaskResult]:
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator.host_states.copy()
results: list[_RawTaskResult] = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results: list[_RawTaskResult] = []
for result in results:
task = result.task
host = result.host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
if task.run_once and iterator._play.strategy in add_internal_fqcns(('linear',)) and result.is_failed():
for host_name, state in prev_host_states.items():
if host_name == host.name:
continue
iterator.set_state_for_host(host_name, state)
iterator._play._removed_hosts.remove(host_name)
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
| StrategySentinel |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_automations/client.py | {
"start": 351,
"end": 5561
} | class ____(BaseClient):
def create_automation(self, automation: "AutomationCore") -> "UUID":
"""Creates an automation in Prefect Cloud."""
response = self.request(
"POST",
"/automations/",
json=automation.model_dump(mode="json"),
)
from uuid import UUID
return UUID(response.json()["id"])
def update_automation(
self, automation_id: "UUID", automation: "AutomationCore"
) -> None:
"""Updates an automation in Prefect Cloud."""
response = self.request(
"PUT",
"/automations/{id}",
path_params={"id": automation_id},
json=automation.model_dump(mode="json", exclude_unset=True),
)
response.raise_for_status()
def read_automations(self) -> list["Automation"]:
response = self.request("POST", "/automations/filter")
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate_list(response.json())
def find_automation(self, id_or_name: "str | UUID") -> "Automation | None":
from uuid import UUID
if isinstance(id_or_name, str):
name = id_or_name
try:
id = UUID(id_or_name)
except ValueError:
id = None
else:
id = id_or_name
name = str(id)
if id:
try:
automation = self.read_automation(id)
return automation
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
automations = self.read_automations()
# Look for it by an exact name
for automation in automations:
if automation.name == name:
return automation
# Look for it by a case-insensitive name
for automation in automations:
if automation.name.lower() == name.lower():
return automation
return None
def read_automation(self, automation_id: "UUID | str") -> "Automation | None":
response = self.request(
"GET", "/automations/{id}", path_params={"id": automation_id}
)
if response.status_code == 404:
return None
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate(response.json())
def read_automations_by_name(self, name: str) -> list["Automation"]:
"""
Query the Prefect API for an automation by name. Only automations matching the provided name will be returned.
Args:
name: the name of the automation to query
Returns:
a list of Automation model representations of the automations
"""
from prefect.client.schemas.sorting import AutomationSort
from prefect.events.filters import (
AutomationFilter,
AutomationFilterName,
)
automation_filter = AutomationFilter(name=AutomationFilterName(any_=[name]))
response = self.request(
"POST",
"/automations/filter",
json={
"sort": AutomationSort.UPDATED_DESC,
"automations": automation_filter.model_dump(mode="json")
if automation_filter
else None,
},
)
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate_list(response.json())
def pause_automation(self, automation_id: "UUID") -> None:
response = self.request(
"PATCH",
"/automations/{id}",
path_params={"id": automation_id},
json={"enabled": False},
)
response.raise_for_status()
def resume_automation(self, automation_id: "UUID") -> None:
response = self.request(
"PATCH",
"/automations/{id}",
path_params={"id": automation_id},
json={"enabled": True},
)
response.raise_for_status()
def delete_automation(self, automation_id: "UUID") -> None:
response = self.request(
"DELETE",
"/automations/{id}",
path_params={"id": automation_id},
)
if response.status_code == 404:
return
response.raise_for_status()
def read_resource_related_automations(self, resource_id: str) -> list["Automation"]:
response = self.request(
"GET",
"/automations/related-to/{resource_id}",
path_params={"resource_id": resource_id},
)
response.raise_for_status()
from prefect.events.schemas.automations import Automation
return Automation.model_validate_list(response.json())
def delete_resource_owned_automations(self, resource_id: str) -> None:
self.request(
"DELETE",
"/automations/owned-by/{resource_id}",
path_params={"resource_id": resource_id},
)
| AutomationClient |
python | PyCQA__pylint | pylint/extensions/mccabe.py | {
"start": 1285,
"end": 1490
} | class ____(Mccabe_PathGraph): # type: ignore[misc]
def __init__(self, node: _SubGraphNodes | nodes.FunctionDef):
super().__init__(name="", entity="", lineno=1)
self.root = node
| PathGraph |
python | gevent__gevent | src/gevent/tests/known_failures.py | {
"start": 4454,
"end": 4498
} | class ____(_Action):
__slots__ = ()
| Failing |
python | kamyu104__LeetCode-Solutions | Python/time-taken-to-mark-all-nodes.py | {
"start": 1717,
"end": 2778
} | class ____(object):
def timeTaken(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
def dfs1(u, p):
for v in adj[u]:
if v == p:
continue
dfs1(v, u)
curr = [(1+int(v%2 == 0))+dp[v][0][0], v]
for i in xrange(len(dp[u])):
if curr > dp[u][i]:
curr, dp[u][i] = dp[u][i], curr
def dfs2(u, p, curr):
result[u] = max(dp[u][0][0], curr)
for v in adj[u]:
if v == p:
continue
dfs2(v, u, (1+int(u%2 == 0))+max((dp[u][0][0] if dp[u][0][1] != v else dp[u][1][0]), curr))
adj = [[] for _ in xrange(len(edges)+1)]
for u, v, in edges:
adj[u].append(v)
adj[v].append(u)
dp = [[[0, -1] for _ in xrange(2)] for _ in xrange(len(edges)+1)]
dfs1(0, -1)
result = [0]*(len(edges)+1)
dfs2(0, -1, 0)
return result
| Solution2 |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/tags.py | {
"start": 1529,
"end": 2642
} | class ____(GenericBase):
"""Tag aggregate condition class."""
@staticmethod
def visit_eq(expression_name: str, value: str) -> Condition:
return Condition(_match_key_value_exact(expression_name, value), Op.EQ, 1)
@staticmethod
def visit_neq(expression_name: str, value: str) -> Condition:
return Condition(_match_key_value_exact(expression_name, value), Op.EQ, 0)
@staticmethod
def visit_in(expression_name: str, value: list[str]) -> Condition:
return Condition(_match_key_values_exact(expression_name, value), Op.EQ, 1)
@staticmethod
def visit_not_in(expression_name: str, value: list[str]) -> Condition:
return Condition(_match_key_values_exact(expression_name, value), Op.EQ, 0)
@staticmethod
def visit_match(expression_name: str, value: str) -> Condition:
return Condition(_match_key_value_wildcard(expression_name, value), Op.EQ, 1)
@staticmethod
def visit_not_match(expression_name: str, value: str) -> Condition:
return Condition(_match_key_value_wildcard(expression_name, value), Op.EQ, 0)
| TagAggregate |
python | gevent__gevent | src/gevent/_waiter.py | {
"start": 6109,
"end": 7387
} | class ____(Waiter):
"""
An internal extension of Waiter that can be used if multiple objects
must be waited on, and there is a chance that in between waits greenlets
might be switched out. All greenlets that switch to this waiter
will have their value returned.
This does not handle exceptions or throw methods.
"""
__slots__ = ['_values']
def __init__(self, hub=None):
Waiter.__init__(self, hub)
# we typically expect a relatively small number of these to be outstanding.
# since we pop from the left, a deque might be slightly
# more efficient, but since we're in the hub we avoid imports if
# we can help it to better support monkey-patching, and delaying the import
# here can be impractical (see https://github.com/gevent/gevent/issues/652)
self._values = []
def switch(self, value):
self._values.append(value)
Waiter.switch(self, True)
def get(self):
if not self._values:
Waiter.get(self)
Waiter.clear(self)
return self._values.pop(0)
def _init():
greenlet_init() # pylint:disable=undefined-variable
_init()
from gevent._util import import_c_accel
import_c_accel(globals(), 'gevent.__waiter')
| MultipleWaiter |
python | wandb__wandb | wandb/vendor/pygments/lexers/hdl.py | {
"start": 6497,
"end": 14692
} | class ____(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
.. versionadded:: 1.5
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[01]+', Number.Bin),
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_]\w*', Name.Constant),
(words((
'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch',
'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins',
'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez',
'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint',
'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign',
'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase',
'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate',
'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive',
'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable',
'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern',
'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin',
'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone',
'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include',
'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface',
'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library',
'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium',
'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled',
'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter',
'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected',
'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent',
'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime',
'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos',
'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime',
's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal',
'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static',
'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1',
'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout',
'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0',
'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0',
'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored',
'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while',
'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'),
Keyword),
(words((
'`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype',
'`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif',
'`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma',
'`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'),
suffix=r'\b'),
Comment.Preproc),
(words((
'$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile',
'$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports',
'$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff',
'$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb',
'$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc',
'$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro',
'$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh',
'$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo',
'$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff',
'$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind',
'$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo',
'$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc',
'$value$plusargs', '$write', '$writeb', '$writeh', '$writememb',
'$writememh', '$writeo'), suffix=r'\b'),
Name.Builtin),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(words((
'byte', 'shortint', 'int', 'longint', 'integer', 'time',
'bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand',
'trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo'
'shortreal', 'real', 'realtime'), suffix=r'\b'),
Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'\$?[a-zA-Z_]\w*', Name),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[\w:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
| SystemVerilogLexer |
python | weaviate__weaviate-python-client | weaviate/collections/classes/tenants.py | {
"start": 3906,
"end": 4476
} | class ____(str, Enum):
"""TenantActivityStatus class used to describe the activity status of a tenant to create in Weaviate.
Attributes:
ACTIVE: The tenant is fully active and can be used.
INACTIVE: The tenant is not active, files stored locally.
HOT: DEPRECATED, please use ACTIVE. The tenant is fully active and can be used.
COLD: DEPRECATED, please use INACTIVE. The tenant is not active, files stored locally.
"""
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
HOT = "HOT"
COLD = "COLD"
| TenantCreateActivityStatus |
python | doocs__leetcode | lcp/LCP 11. 期望个数统计/Solution.py | {
"start": 0,
"end": 102
} | class ____:
def expectNumber(self, scores: List[int]) -> int:
return len(set(scores))
| Solution |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 14496,
"end": 14637
} | class ____(_TestDSTIBase):
def setup_method(self):
self.rdt = np.float64
self.dec = 12
self.type = 1
| TestDSTIDouble |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 28366,
"end": 29170
} | class ____(DiagnosticPipError):
"""Raised when the dependency resolver exceeds the maximum recursion depth."""
reference = "resolution-too-deep"
def __init__(self) -> None:
super().__init__(
message="Dependency resolution exceeded maximum depth",
context=(
"Pip cannot resolve the current dependencies as the dependency graph "
"is too complex for pip to solve efficiently."
),
hint_stmt=(
"Try adding lower bounds to constrain your dependencies, "
"for example: 'package>=2.0.0' instead of just 'package'. "
),
link="https://pip.pypa.io/en/stable/topics/dependency-resolution/#handling-resolution-too-deep-errors",
)
| ResolutionTooDeepError |
python | donnemartin__system-design-primer | solutions/system_design/social_graph/social_graph_snippets.py | {
"start": 729,
"end": 863
} | class ____(object):
def __init__(self, id, name):
self.id = id
self.name = name
self.friend_ids = []
| Person |
python | PrefectHQ__prefect | src/integrations/prefect-aws/tests/test_secrets_manager.py | {
"start": 4561,
"end": 9419
} | class ____:
"""Test asynchronous AwsSecret methods"""
async def test_read_secret(self, secret_under_test, aws_credentials):
expected_value = secret_under_test.pop("expected_value")
secret_name = secret_under_test.pop(
"secret_name"
) # Remove secret_name from kwargs
@flow
async def test_flow():
secret = AwsSecret(
aws_credentials=aws_credentials,
secret_name=secret_name, # Use for AwsSecret initialization
)
# Pass remaining kwargs (version_id, version_stage) if present
return await secret.aread_secret(**secret_under_test)
assert (await test_flow()) == expected_value
async def test_write_secret(self, aws_credentials, secretsmanager_client):
secret = AwsSecret(aws_credentials=aws_credentials, secret_name="my-test")
secret_value = b"test-secret-value"
@flow
async def test_flow():
return await secret.awrite_secret(secret_value)
arn = await test_flow()
assert arn.startswith("arn:aws:secretsmanager")
# Verify the secret was written correctly
response = secretsmanager_client.get_secret_value(SecretId="my-test")
assert response["SecretBinary"] == secret_value
async def test_delete_secret(self, aws_credentials, secretsmanager_client):
# First create a secret to delete
secret = AwsSecret(aws_credentials=aws_credentials, secret_name="test-delete")
secret_value = b"delete-me"
@flow
async def setup_flow():
return await secret.awrite_secret(secret_value)
arn = await setup_flow()
# Now test deletion
@flow
async def test_flow():
return await secret.adelete_secret(
recovery_window_in_days=7, force_delete_without_recovery=False
)
deleted_arn = await test_flow()
assert deleted_arn == arn
# Verify the secret is scheduled for deletion
with pytest.raises(secretsmanager_client.exceptions.InvalidRequestException):
secretsmanager_client.get_secret_value(SecretId="test-delete")
async def test_delete_secret_validation(self, aws_credentials):
secret = AwsSecret(
aws_credentials=aws_credentials, secret_name="test-validation"
)
with pytest.raises(ValueError, match="Cannot specify recovery window"):
await secret.adelete_secret(
force_delete_without_recovery=True, recovery_window_in_days=10
)
with pytest.raises(
ValueError, match="Recovery window must be between 7 and 30 days"
):
await secret.adelete_secret(recovery_window_in_days=42)
# Keep existing task-based tests
@pytest.mark.parametrize(
["secret_name", "secret_value"], [["string_secret", "42"], ["binary_secret", b"42"]]
)
async def test_create_secret(
aws_credentials, secret_name, secret_value, secretsmanager_client
):
@flow
async def test_flow():
return await create_secret(
secret_name=secret_name,
secret_value=secret_value,
aws_credentials=aws_credentials,
)
flow_state = await test_flow()
assert flow_state.get("Name") == secret_name
updated_secret = secretsmanager_client.get_secret_value(SecretId=secret_name)
assert (
updated_secret.get("SecretString") == secret_value
or updated_secret.get("SecretBinary") == secret_value
)
@pytest.mark.parametrize(
["recovery_window_in_days", "force_delete_without_recovery"],
[
[30, False],
[20, False],
[7, False],
[8, False],
[10, False],
[15, True],
[29, True],
],
)
async def test_delete_secret_task(
aws_credentials,
secret_under_test,
recovery_window_in_days,
force_delete_without_recovery,
):
@flow
async def test_flow():
return await delete_secret(
secret_name=secret_under_test["secret_name"],
aws_credentials=aws_credentials,
recovery_window_in_days=recovery_window_in_days,
force_delete_without_recovery=force_delete_without_recovery,
)
result = await test_flow()
if not force_delete_without_recovery and not 7 <= recovery_window_in_days <= 30:
with pytest.raises(ValueError):
result.get()
else:
assert result.get("Name") == secret_under_test["secret_name"]
deletion_date = result.get("DeletionDate")
if not force_delete_without_recovery:
assert deletion_date.date() == (
now("UTC").date() + timedelta(days=recovery_window_in_days)
)
else:
assert deletion_date.date() == now("UTC").date()
| TestAwsSecretAsync |
python | doocs__leetcode | solution/0600-0699/0680.Valid Palindrome II/Solution.py | {
"start": 0,
"end": 429
} | class ____:
def validPalindrome(self, s: str) -> bool:
def check(i, j):
while i < j:
if s[i] != s[j]:
return False
i, j = i + 1, j - 1
return True
i, j = 0, len(s) - 1
while i < j:
if s[i] != s[j]:
return check(i, j - 1) or check(i + 1, j)
i, j = i + 1, j - 1
return True
| Solution |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 30564,
"end": 31280
} | class ____(unittest.TestCase):
num_sample_runs = 50
def setUp(self):
self.setup_constants()
self.setup_faker()
def setup_faker(self):
self.fake = Faker("fil_PH")
Faker.seed(0)
def setup_constants(self):
from faker.providers.date_time.fil_PH import Provider
self.day_names = Provider.DAY_NAMES.values()
self.month_names = Provider.MONTH_NAMES.values()
def test_PH_of_week(self):
for _ in range(self.num_sample_runs):
assert self.fake.day_of_week() in self.day_names
def test_PH_month_name(self):
for _ in range(self.num_sample_runs):
assert self.fake.month_name() in self.month_names
| TestFilPh |
python | django__django | django/db/migrations/migration.py | {
"start": 9310,
"end": 9765
} | class ____(tuple):
"""
Subclass of tuple so Django can tell this was originally a swappable
dependency when it reads the migration file.
"""
def __new__(cls, value, setting):
self = tuple.__new__(cls, value)
self.setting = setting
return self
def swappable_dependency(value):
"""Turn a setting value into a dependency."""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
| SwappableTuple |
python | dask__distributed | distributed/deploy/tests/test_adaptive.py | {
"start": 16411,
"end": 22639
} | class ____(Adaptive):
def __init__(self, *args, interval=None, **kwargs):
super().__init__(*args, interval=interval, **kwargs)
self._target = 0
self._log = []
self._observed = set()
self._plan = set()
self._requested = set()
@property
def observed(self):
return self._observed
@property
def plan(self):
return self._plan
@property
def requested(self):
return self._requested
async def target(self):
return self._target
async def scale_up(self, n=0):
self._plan = self._requested = set(range(n))
async def scale_down(self, workers=()):
for collection in [self.plan, self.requested, self.observed]:
for w in workers:
collection.discard(w)
@gen_test()
async def test_adaptive_stops_on_cluster_status_change():
async with LocalCluster(
n_workers=0,
asynchronous=True,
silence_logs=False,
dashboard_address=":0",
) as cluster:
adapt = Adaptive(cluster, interval="100 ms")
assert adapt.state == "starting"
await async_poll_for(lambda: adapt.state == "running", timeout=5)
assert adapt.periodic_callback
assert adapt.periodic_callback.is_running()
try:
cluster.status = Status.closing
await async_poll_for(lambda: adapt.state != "running", timeout=5)
assert adapt.state == "stopped"
assert not adapt.periodic_callback
finally:
# Set back to running to let normal shutdown do its thing
cluster.status = Status.running
@gen_test()
async def test_interval():
async with LocalCluster(
n_workers=0,
asynchronous=True,
silence_logs=False,
dashboard_address=":0",
) as cluster:
adapt = MyAdaptive(cluster=cluster, interval="100 ms")
assert not adapt.plan
for i in [0, 3, 1]:
start = time()
adapt._target = i
while len(adapt.plan) != i:
await asyncio.sleep(0.01)
assert time() < start + 2
adapt.stop()
await asyncio.sleep(0.05)
adapt._target = 10
await asyncio.sleep(0.02)
assert len(adapt.plan) == 1 # last value from before, unchanged
@gen_test()
async def test_adapt_logs_error_in_safe_target():
class BadAdaptive(MyAdaptive):
"""Adaptive subclass which raises an OSError when attempting to adapt
We use this to check that error handling works properly
"""
def safe_target(self):
raise OSError()
async with LocalCluster(
n_workers=0,
asynchronous=True,
silence_logs=False,
dashboard_address=":0",
) as cluster:
with captured_logger(
"distributed.deploy.adaptive", level=logging.WARNING
) as log:
adapt = cluster.adapt(
Adaptive=BadAdaptive, minimum=1, maximum=4, interval="10ms"
)
while "encountered an error" not in log.getvalue():
await asyncio.sleep(0.01)
assert "stop" not in log.getvalue()
assert adapt.state == "running"
assert adapt.periodic_callback
assert adapt.periodic_callback.is_running()
@gen_test()
async def test_adapt_callback_logs_error_in_scale_down():
class BadAdaptive(MyAdaptive):
async def scale_down(self, workers=None):
raise OSError()
async with LocalCluster(
n_workers=0,
asynchronous=True,
silence_logs=False,
dashboard_address=":0",
) as cluster:
adapt = cluster.adapt(
Adaptive=BadAdaptive, minimum=1, maximum=4, wait_count=0, interval="10ms"
)
adapt._target = 2
await async_poll_for(lambda: adapt.state == "running", timeout=5)
assert adapt.periodic_callback.is_running()
await adapt.adapt()
assert len(adapt.plan) == 2
assert len(adapt.requested) == 2
with captured_logger(
"distributed.deploy.adaptive", level=logging.WARNING
) as log:
adapt._target = 0
while "encountered an error" not in log.getvalue():
await asyncio.sleep(0.01)
assert "stop" not in log.getvalue()
assert not adapt._adapting
assert adapt.periodic_callback
assert adapt.periodic_callback.is_running()
@pytest.mark.parametrize("wait_until_running", [True, False])
@gen_test()
async def test_adaptive_logs_stopping_once(wait_until_running):
async with LocalCluster(
n_workers=0,
asynchronous=True,
silence_logs=False,
dashboard_address=":0",
) as cluster:
with captured_logger("distributed.deploy.adaptive") as log:
adapt = cluster.adapt(Adaptive=MyAdaptive, interval="100ms")
if wait_until_running:
await async_poll_for(lambda: adapt.state == "running", timeout=5)
assert adapt.periodic_callback
assert adapt.periodic_callback.is_running()
pc = adapt.periodic_callback
else:
assert adapt.periodic_callback
assert not adapt.periodic_callback.is_running()
pc = adapt.periodic_callback
adapt.stop()
adapt.stop()
assert adapt.state == "stopped"
assert not adapt.periodic_callback
assert not pc.is_running()
lines = log.getvalue().splitlines()
assert sum("Adaptive scaling stopped" in line for line in lines) == 1
@gen_test()
async def test_adapt_stop_del():
async with LocalCluster(
n_workers=0,
asynchronous=True,
silence_logs=False,
dashboard_address=":0",
) as cluster:
adapt = cluster.adapt(Adaptive=MyAdaptive, interval="100ms")
pc = adapt.periodic_callback
await async_poll_for(lambda: adapt.state == "running", timeout=5) # noqa: F821
# Remove reference of adaptive object from cluster
cluster._adaptive = None
del adapt
await async_poll_for(lambda: not pc.is_running(), timeout=5)
| MyAdaptive |
python | realpython__materials | typer-cli-python/source_code_final/rptodo/database.py | {
"start": 794,
"end": 877
} | class ____(NamedTuple):
todo_list: List[Dict[str, Any]]
error: int
| DBResponse |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py | {
"start": 4018,
"end": 5662
} | class ____(TestCase):
def test_encoded_size_is_within_expected_limit(self) -> None:
state = _RendezvousState()
state.round = 1
state.complete = True
state.deadline = datetime.now(timezone.utc)
state.closed = True
# fmt: off
expected_max_sizes = (
( 5, 2 * (2 ** 10),), # 10 machines <= 2KB # noqa: E201, E241, E262
( 50, 16 * (2 ** 10),), # 100 machines <= 16KB # noqa: E201, E241, E262
( 500, 160 * (2 ** 10),), # 1000 machines <= 160KB # noqa: E201, E241, E262
(5000, 1600 * (2 ** 10),), # 10000 machines <= 1.6MB # noqa: E201, E241, E262
)
# fmt: on
for num_nodes, max_byte_size in expected_max_sizes:
with self.subTest(num_nodes=num_nodes, max_byte_size=max_byte_size):
for i in range(num_nodes):
node_running = _NodeDesc(
f"dummy{i}.dummy1-dummy1-dummy1-dummy1.com", 12345, i
)
node_waiting = _NodeDesc(
f"dummy{i}.dummy2-dummy2-dummy2-dummy2.com", 67890, i
)
state.participants[node_running] = i
state.wait_list.add(node_waiting)
state.last_heartbeats[node_running] = datetime.now(timezone.utc)
state.last_heartbeats[node_waiting] = datetime.now(timezone.utc)
bits = pickle.dumps(state)
base64_bits = b64encode(bits)
self.assertLessEqual(len(base64_bits), max_byte_size)
| RendezvousStateTest |
python | huggingface__transformers | tests/models/pvt_v2/test_modeling_pvt_v2.py | {
"start": 12365,
"end": 14971
} | class ____(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (PvtV2Backbone,) if is_torch_available() else ()
has_attentions = False
config_class = PvtV2Config
def test_config(self):
config_class = self.config_class
# test default config
config = config_class()
self.assertIsNotNone(config)
num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers
expected_stage_names = [f"stage{idx}" for idx in range(1, num_stages + 1)]
self.assertEqual(config.stage_names, expected_stage_names)
self.assertTrue(set(config.out_features).issubset(set(config.stage_names)))
# Test out_features and out_indices are correctly set
# out_features and out_indices both None
config = config_class(out_features=None, out_indices=None)
self.assertEqual(config.out_features, [config.stage_names[-1]])
self.assertEqual(config.out_indices, [len(config.stage_names) - 1])
# out_features and out_indices both set
config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 1])
self.assertEqual(config.out_features, ["stage1", "stage2"])
self.assertEqual(config.out_indices, [0, 1])
# Only out_features set
config = config_class(out_features=["stage2", "stage4"])
self.assertEqual(config.out_features, ["stage2", "stage4"])
self.assertEqual(config.out_indices, [1, 3])
# Only out_indices set
config = config_class(out_indices=[0, 2])
self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]])
self.assertEqual(config.out_indices, [0, 2])
# Error raised when out_indices do not correspond to out_features
with self.assertRaises(ValueError):
config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2])
def test_config_save_pretrained(self):
config_class = self.config_class
config_first = config_class(out_indices=[0, 1, 2, 3])
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname)
# Fix issue where type switches in the saving process
if isinstance(config_second.image_size, list):
config_second.image_size = tuple(config_second.image_size)
self.assertEqual(config_second.to_dict(), config_first.to_dict())
def setUp(self):
self.model_tester = PvtV2ModelTester(self)
| PvtV2BackboneTest |
python | neetcode-gh__leetcode | python/0040-combination-sum-ii.py | {
"start": 0,
"end": 693
} | class ____:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
res = []
def backtrack(cur, pos, target):
if target == 0:
res.append(cur.copy())
return
if target <= 0:
return
prev = -1
for i in range(pos, len(candidates)):
if candidates[i] == prev:
continue
cur.append(candidates[i])
backtrack(cur, i + 1, target - candidates[i])
cur.pop()
prev = candidates[i]
backtrack([], 0, target)
return res
| Solution |
python | wandb__wandb | wandb/cli/beta_sync.py | {
"start": 2856,
"end": 6960
} | class ____:
"""Displays a sync operation's status until it completes."""
def __init__(
self,
id: str,
service: ServiceConnection,
printer: Printer,
) -> None:
self._id = id
self._service = service
self._printer = printer
self._rate_limit_last_time: float | None = None
self._done = asyncio.Event()
async def wait_with_progress(
self,
handle: MailboxHandle[ServerSyncResponse],
) -> None:
"""Display status updates until the handle completes."""
async with asyncio_compat.open_task_group() as group:
group.start_soon(self._wait_then_mark_done(handle))
group.start_soon(self._show_progress_until_done())
async def _wait_then_mark_done(
self,
handle: MailboxHandle[ServerSyncResponse],
) -> None:
response = await handle.wait_async(timeout=None)
for msg in response.messages:
self._printer.display(msg.content, level=msg.severity)
self._done.set()
async def _show_progress_until_done(self) -> None:
"""Show rate-limited status updates until _done is set."""
with progress_printer(self._printer, "Syncing...") as progress:
while not await self._rate_limit_check_done():
handle = await self._service.sync_status(self._id)
response = await handle.wait_async(timeout=None)
for msg in response.new_messages:
self._printer.display(msg.content, level=msg.severity)
progress.update(dict(response.stats))
async def _rate_limit_check_done(self) -> bool:
"""Wait for rate limit and return whether _done is set."""
now = time.monotonic()
last_time = self._rate_limit_last_time
self._rate_limit_last_time = now
if last_time and (time_since_last := now - last_time) < _POLL_WAIT_SECONDS:
await asyncio_compat.race(
_SLEEP(_POLL_WAIT_SECONDS - time_since_last),
self._done.wait(),
)
return self._done.is_set()
def _find_wandb_files(
path: pathlib.Path,
*,
skip_synced: bool,
) -> Iterator[pathlib.Path]:
"""Returns paths to the .wandb files to sync."""
if skip_synced:
yield from filterfalse(_is_synced, _expand_wandb_files(path))
else:
yield from _expand_wandb_files(path)
def _expand_wandb_files(
path: pathlib.Path,
) -> Iterator[pathlib.Path]:
"""Iterate over .wandb files selected by the path."""
if path.suffix == ".wandb":
yield path
return
files_in_run_directory = path.glob("*.wandb")
try:
first_file = next(files_in_run_directory)
except StopIteration:
pass
else:
yield first_file
yield from files_in_run_directory
return
yield from path.glob("*/*.wandb")
def _is_synced(path: pathlib.Path) -> bool:
"""Returns whether the .wandb file is synced."""
return path.with_suffix(".wandb.synced").exists()
def _print_sorted_paths(paths: Iterable[pathlib.Path], verbose: bool) -> None:
"""Print file paths, sorting them and truncating the list if needed.
Args:
paths: Paths to print. Must be absolute with symlinks resolved.
verbose: If true, doesn't truncate paths.
"""
# Prefer to print paths relative to the current working directory.
cwd = pathlib.Path(".").resolve()
formatted_paths: list[str] = []
for path in paths:
try:
formatted_path = str(path.relative_to(cwd))
except ValueError:
formatted_path = str(path)
formatted_paths.append(formatted_path)
sorted_paths = sorted(formatted_paths)
max_lines = len(sorted_paths) if verbose else _MAX_LIST_LINES
for i in range(min(len(sorted_paths), max_lines)):
click.echo(f" {sorted_paths[i]}")
if len(sorted_paths) > max_lines:
remaining = len(sorted_paths) - max_lines
click.echo(f" +{remaining:,d} more (pass --verbose to see all)")
| _SyncStatusLoop |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 28316,
"end": 46383
} | class ____(Protocol[L]):
def __call__(self, uris: URIs) -> L:
...
def validate_ids(ids: IDs) -> IDs:
"""Validates ids to ensure it is a list of strings"""
if not isinstance(ids, list):
raise ValueError(f"Expected IDs to be a list, got {type(ids).__name__} as IDs")
if len(ids) == 0:
raise ValueError(f"Expected IDs to be a non-empty list, got {len(ids)} IDs")
seen = set()
dups = set()
for id_ in ids:
if not isinstance(id_, str):
raise ValueError(f"Expected ID to be a str, got {id_}")
if id_ in seen:
dups.add(id_)
else:
seen.add(id_)
if dups:
n_dups = len(dups)
if n_dups < 10:
example_string = ", ".join(dups)
message = (
f"Expected IDs to be unique, found duplicates of: {example_string}"
)
else:
examples = []
for idx, dup in enumerate(dups):
examples.append(dup)
if idx == 10:
break
example_string = (
f"{', '.join(examples[:5])}, ..., {', '.join(examples[-5:])}"
)
message = f"Expected IDs to be unique, found {n_dups} duplicated IDs: {example_string}"
raise errors.DuplicateIDError(message)
return ids
def validate_metadata(metadata: Metadata) -> Metadata:
"""Validates metadata to ensure it is a dictionary of strings to strings, ints, floats, bools, or SparseVectors"""
if not isinstance(metadata, dict) and metadata is not None:
raise ValueError(
f"Expected metadata to be a dict or None, got {type(metadata).__name__} as metadata"
)
if metadata is None:
return metadata
if len(metadata) == 0:
raise ValueError(
f"Expected metadata to be a non-empty dict, got {len(metadata)} metadata attributes"
)
for key, value in metadata.items():
if key == META_KEY_CHROMA_DOCUMENT:
raise ValueError(
f"Expected metadata to not contain the reserved key {META_KEY_CHROMA_DOCUMENT}"
)
if not isinstance(key, str):
raise TypeError(
f"Expected metadata key to be a str, got {key} which is a {type(key).__name__}"
)
# Check if value is a SparseVector (validation happens in __post_init__)
if isinstance(value, SparseVector):
pass # Already validated in SparseVector.__post_init__
# isinstance(True, int) evaluates to True, so we need to check for bools separately
elif not isinstance(value, bool) and not isinstance(
value, (str, int, float, type(None))
):
raise ValueError(
f"Expected metadata value to be a str, int, float, bool, SparseVector, or None, got {value} which is a {type(value).__name__}"
)
return metadata
def validate_update_metadata(metadata: UpdateMetadata) -> UpdateMetadata:
"""Validates metadata to ensure it is a dictionary of strings to strings, ints, floats, bools, or SparseVectors"""
if not isinstance(metadata, dict) and metadata is not None:
raise ValueError(
f"Expected metadata to be a dict or None, got {type(metadata)}"
)
if metadata is None:
return metadata
if len(metadata) == 0:
raise ValueError(f"Expected metadata to be a non-empty dict, got {metadata}")
for key, value in metadata.items():
if not isinstance(key, str):
raise ValueError(f"Expected metadata key to be a str, got {key}")
# Check if value is a SparseVector (validation happens in __post_init__)
if isinstance(value, SparseVector):
pass # Already validated in SparseVector.__post_init__
# isinstance(True, int) evaluates to True, so we need to check for bools separately
elif not isinstance(value, bool) and not isinstance(
value, (str, int, float, type(None))
):
raise ValueError(
f"Expected metadata value to be a str, int, float, bool, SparseVector, or None, got {value}"
)
return metadata
def serialize_metadata(metadata: Optional[Metadata]) -> Optional[Dict[str, Any]]:
"""Serialize metadata for transport, converting SparseVector dataclass instances to dicts.
Args:
metadata: Metadata dictionary that may contain SparseVector instances
Returns:
Metadata dictionary with SparseVector instances converted to transport format
"""
if metadata is None:
return None
result: Dict[str, Any] = {}
for key, value in metadata.items():
if isinstance(value, SparseVector):
result[key] = value.to_dict()
else:
result[key] = value
return result
def deserialize_metadata(
metadata: Optional[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
"""Deserialize metadata from transport, converting dicts with #type=sparse_vector to dataclass instances.
Args:
metadata: Metadata dictionary from transport that may contain serialized SparseVectors
Returns:
Metadata dictionary with serialized SparseVectors converted to dataclass instances
"""
if metadata is None:
return None
result: Dict[str, Any] = {}
for key, value in metadata.items():
if isinstance(value, dict) and value.get(TYPE_KEY) == SPARSE_VECTOR_TYPE_VALUE:
result[key] = SparseVector.from_dict(value)
else:
result[key] = value
return result
def validate_metadatas(metadatas: Metadatas) -> Metadatas:
"""Validates metadatas to ensure it is a list of dictionaries of strings to strings, ints, floats or bools"""
if not isinstance(metadatas, list):
raise ValueError(f"Expected metadatas to be a list, got {metadatas}")
for metadata in metadatas:
validate_metadata(metadata)
return metadatas
def validate_where(where: Where) -> None:
"""
Validates where to ensure it is a dictionary of strings to strings, ints, floats or operator expressions,
or in the case of $and and $or, a list of where expressions
"""
if not isinstance(where, dict):
raise ValueError(f"Expected where to be a dict, got {where}")
if len(where) != 1:
raise ValueError(f"Expected where to have exactly one operator, got {where}")
for key, value in where.items():
if not isinstance(key, str):
raise ValueError(f"Expected where key to be a str, got {key}")
if (
key != "$and"
and key != "$or"
and key != "$in"
and key != "$nin"
and not isinstance(value, (str, int, float, dict))
):
raise ValueError(
f"Expected where value to be a str, int, float, or operator expression, got {value}"
)
if key == "$and" or key == "$or":
if not isinstance(value, list):
raise ValueError(
f"Expected where value for $and or $or to be a list of where expressions, got {value}"
)
if len(value) <= 1:
raise ValueError(
f"Expected where value for $and or $or to be a list with at least two where expressions, got {value}"
)
for where_expression in value:
validate_where(where_expression)
# Value is a operator expression
if isinstance(value, dict):
# Ensure there is only one operator
if len(value) != 1:
raise ValueError(
f"Expected operator expression to have exactly one operator, got {value}"
)
for operator, operand in value.items():
# Only numbers can be compared with gt, gte, lt, lte
if operator in ["$gt", "$gte", "$lt", "$lte"]:
if not isinstance(operand, (int, float)):
raise ValueError(
f"Expected operand value to be an int or a float for operator {operator}, got {operand}"
)
if operator in ["$in", "$nin"]:
if not isinstance(operand, list):
raise ValueError(
f"Expected operand value to be an list for operator {operator}, got {operand}"
)
if operator not in [
"$gt",
"$gte",
"$lt",
"$lte",
"$ne",
"$eq",
"$in",
"$nin",
]:
raise ValueError(
f"Expected where operator to be one of $gt, $gte, $lt, $lte, $ne, $eq, $in, $nin, "
f"got {operator}"
)
if not isinstance(operand, (str, int, float, list)):
raise ValueError(
f"Expected where operand value to be a str, int, float, or list of those type, got {operand}"
)
if isinstance(operand, list) and (
len(operand) == 0
or not all(isinstance(x, type(operand[0])) for x in operand)
):
raise ValueError(
f"Expected where operand value to be a non-empty list, and all values to be of the same type "
f"got {operand}"
)
def validate_where_document(where_document: WhereDocument) -> None:
"""
Validates where_document to ensure it is a dictionary of WhereDocumentOperator to strings, or in the case of $and and $or,
a list of where_document expressions
"""
if not isinstance(where_document, dict):
raise ValueError(
f"Expected where document to be a dictionary, got {where_document}"
)
if len(where_document) != 1:
raise ValueError(
f"Expected where document to have exactly one operator, got {where_document}"
)
for operator, operand in where_document.items():
if operator not in [
"$contains",
"$not_contains",
"$regex",
"$not_regex",
"$and",
"$or",
]:
raise ValueError(
f"Expected where document operator to be one of $contains, $not_contains, $regex, $not_regex, $and, $or, got {operator}"
)
if operator == "$and" or operator == "$or":
if not isinstance(operand, list):
raise ValueError(
f"Expected document value for $and or $or to be a list of where document expressions, got {operand}"
)
if len(operand) <= 1:
raise ValueError(
f"Expected document value for $and or $or to be a list with at least two where document expressions, got {operand}"
)
for where_document_expression in operand:
validate_where_document(where_document_expression)
# Value is $contains/$not_contains/$regex/$not_regex operator
elif not isinstance(operand, str):
raise ValueError(
f"Expected where document operand value for operator {operator} to be a str, got {operand}"
)
elif len(operand) == 0:
raise ValueError(
f"Expected where document operand value for operator {operator} to be a non-empty str"
)
def validate_include(include: Include, dissalowed: Optional[Include] = None) -> None:
"""Validates include to ensure it is a list of strings. Since get does not allow distances, allow_distances is used
to control if distances is allowed"""
if not isinstance(include, list):
raise ValueError(f"Expected include to be a list, got {include}")
for item in include:
if not isinstance(item, str):
raise ValueError(f"Expected include item to be a str, got {item}")
# Get the valid items from the Literal type inside the List
valid_items = get_args(get_args(Include)[0])
if item not in valid_items:
raise ValueError(
f"Expected include item to be one of {', '.join(valid_items)}, got {item}"
)
if dissalowed is not None and any(item == e for e in dissalowed):
raise ValueError(
f"Include item cannot be one of {', '.join(dissalowed)}, got {item}"
)
def validate_n_results(n_results: int) -> int:
"""Validates n_results to ensure it is a positive Integer. Since hnswlib does not allow n_results to be negative."""
# Check Number of requested results
if not isinstance(n_results, int):
raise ValueError(
f"Expected requested number of results to be a int, got {n_results}"
)
if n_results <= 0:
raise TypeError(
f"Number of requested results {n_results}, cannot be negative, or zero."
)
return n_results
def validate_embeddings(embeddings: Embeddings) -> Embeddings:
"""Validates embeddings to ensure it is a list of numpy arrays of ints, or floats"""
if not isinstance(embeddings, (list, np.ndarray)):
raise ValueError(
f"Expected embeddings to be a list, got {type(embeddings).__name__}"
)
if len(embeddings) == 0:
raise ValueError(
f"Expected embeddings to be a list with at least one item, got {len(embeddings)} embeddings"
)
if not all([isinstance(e, np.ndarray) for e in embeddings]):
raise ValueError(
"Expected each embedding in the embeddings to be a numpy array, got "
f"{list(set([type(e).__name__ for e in embeddings]))}"
)
for i, embedding in enumerate(embeddings):
if embedding.ndim == 0:
raise ValueError(
f"Expected a 1-dimensional array, got a 0-dimensional array {embedding}"
)
if embedding.size == 0:
raise ValueError(
f"Expected each embedding in the embeddings to be a 1-dimensional numpy array with at least 1 int/float value. Got a 1-dimensional numpy array with no values at pos {i}"
)
if embedding.dtype not in [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]:
raise ValueError(
"Expected each value in the embedding to be a int or float, got an embedding with "
f"{embedding.dtype} - {embedding}"
)
return embeddings
def validate_sparse_vectors(vectors: SparseVectors) -> SparseVectors:
"""Validates sparse vectors to ensure it is a non-empty list of SparseVector instances.
This function validates the structure and types of sparse vectors returned by
SparseEmbeddingFunction implementations. It ensures:
- Vectors is a list
- List is non-empty
- All items are SparseVector instances
Note: Individual SparseVector validation (sorted indices, non-negative values, etc.)
happens automatically in SparseVector.__post_init__ when each instance is created.
This function only validates the list structure and instance types.
"""
if not isinstance(vectors, list):
raise ValueError(
f"Expected sparse vectors to be a list, got {type(vectors).__name__}"
)
if len(vectors) == 0:
raise ValueError(
f"Expected sparse vectors to be a non-empty list, got {len(vectors)} sparse vectors"
)
for i, vector in enumerate(vectors):
if not isinstance(vector, SparseVector):
raise ValueError(
f"Expected SparseVector instance at position {i}, got {type(vector).__name__}"
)
return vectors
def validate_documents(documents: Documents, nullable: bool = False) -> None:
"""Validates documents to ensure it is a list of strings"""
if not isinstance(documents, list):
raise ValueError(
f"Expected documents to be a list, got {type(documents).__name__}"
)
if len(documents) == 0:
raise ValueError(
f"Expected documents to be a non-empty list, got {len(documents)} documents"
)
for document in documents:
# If embeddings are present, some documents can be None
if document is None and nullable:
continue
if not is_document(document):
raise ValueError(f"Expected document to be a str, got {document}")
def validate_images(images: Images) -> None:
"""Validates images to ensure it is a list of numpy arrays"""
if not isinstance(images, list):
raise ValueError(f"Expected images to be a list, got {type(images).__name__}")
if len(images) == 0:
raise ValueError(
f"Expected images to be a non-empty list, got {len(images)} images"
)
for image in images:
if not is_image(image):
raise ValueError(f"Expected image to be a numpy array, got {image}")
def validate_batch(
batch: Tuple[
IDs,
Optional[Union[Embeddings, PyEmbeddings]],
Optional[Metadatas],
Optional[Documents],
Optional[URIs],
],
limits: Dict[str, Any],
) -> None:
if len(batch[0]) > limits["max_batch_size"]:
raise ValueError(
f"Batch size {len(batch[0])} exceeds maximum batch size {limits['max_batch_size']}"
)
def convert_np_embeddings_to_list(embeddings: Embeddings) -> PyEmbeddings:
# Cast the result to PyEmbeddings to ensure type compatibility
return cast(PyEmbeddings, [embedding.tolist() for embedding in embeddings])
def convert_list_embeddings_to_np(embeddings: PyEmbeddings) -> Embeddings:
return [np.array(embedding) for embedding in embeddings]
@runtime_checkable
| DataLoader |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 40600,
"end": 55228
} | class ____:
#: Group key -> summary.
#: Right now, we only have func_class_name as a key.
# TODO(sang): Support the task group abstraction.
summary: Union[Dict[str, TaskSummaryPerFuncOrClassName], List[NestedTaskSummary]]
#: Total Ray tasks.
total_tasks: int
#: Total actor tasks.
total_actor_tasks: int
#: Total scheduled actors.
total_actor_scheduled: int
summary_by: str = "func_name"
@classmethod
def to_summary_by_func_name(cls, *, tasks: List[Dict]) -> "TaskSummaries":
# NOTE: The argument tasks contains a list of dictionary
# that have the same k/v as TaskState.
summary = {}
total_tasks = 0
total_actor_tasks = 0
total_actor_scheduled = 0
for task in tasks:
key = task["func_or_class_name"]
if key not in summary:
summary[key] = TaskSummaryPerFuncOrClassName(
func_or_class_name=task["func_or_class_name"],
type=task["type"],
)
task_summary = summary[key]
state = task["state"]
if state not in task_summary.state_counts:
task_summary.state_counts[state] = 0
task_summary.state_counts[state] += 1
type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number
if type_enum == TaskType.NORMAL_TASK:
total_tasks += 1
elif type_enum == TaskType.ACTOR_CREATION_TASK:
total_actor_scheduled += 1
elif type_enum == TaskType.ACTOR_TASK:
total_actor_tasks += 1
return TaskSummaries(
summary=summary,
total_tasks=total_tasks,
total_actor_tasks=total_actor_tasks,
total_actor_scheduled=total_actor_scheduled,
summary_by="func_name",
)
@classmethod
def to_summary_by_lineage(
cls, *, tasks: List[Dict], actors: List[Dict]
) -> "TaskSummaries":
"""
This summarizes tasks by lineage.
i.e. A task will be grouped with another task if they have the
same parent.
This does things in 4 steps.
Step 1: Iterate through all tasks and keep track of them by id and ownership
Step 2: Put the tasks in a tree structure based on ownership
Step 3: Merge together siblings in the tree if there are more
than one with the same name.
Step 4: Sort by running and then errored and then successful tasks
Step 5: Total the children
This can probably be more efficient if we merge together some steps to
reduce the amount of iterations but this algorithm produces very easy to
understand code. We can optimize in the future.
"""
# NOTE: The argument tasks contains a list of dictionary
# that have the same k/v as TaskState.
tasks_by_id = {}
task_group_by_id = {}
actor_creation_task_id_for_actor_id = {}
summary = []
total_tasks = 0
total_actor_tasks = 0
total_actor_scheduled = 0
# Step 1
# We cannot assume that a parent task always comes before the child task
# So we need to keep track of all tasks by ids so we can quickly find the
# parent.
# We also track the actor creation tasks so we can quickly figure out the
# ownership of actors.
for task in tasks:
tasks_by_id[task["task_id"]] = task
type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number
if type_enum == TaskType.ACTOR_CREATION_TASK:
actor_creation_task_id_for_actor_id[task["actor_id"]] = task["task_id"]
actor_dict = {actor["actor_id"]: actor for actor in actors}
def get_or_create_task_group(task_id: str) -> Optional[NestedTaskSummary]:
"""
Gets an already created task_group
OR
Creates a task group and puts it in the right place under its parent.
For actor tasks, the parent is the Actor that owns it. For all other
tasks, the owner is the driver or task that created it.
Returns None if there is missing data about the task or one of its parents.
For task groups that represents actors, the id is in the
format actor:{actor_id}
"""
if task_id in task_group_by_id:
return task_group_by_id[task_id]
task = tasks_by_id.get(task_id)
if not task:
logger.debug(f"We're missing data about {task_id}")
# We're missing data about this parent. So we're dropping the whole
# tree at that node.
return None
# Use name first which allows users to customize the name of
# their remote function call using the name option.
func_name = task["name"] or task["func_or_class_name"]
task_id = task["task_id"]
type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number
task_group_by_id[task_id] = NestedTaskSummary(
name=func_name,
key=task_id,
type=task["type"],
timestamp=task["creation_time_ms"],
link=Link(type="task", id=task_id),
)
# Set summary in right place under parent
if (
type_enum == TaskType.ACTOR_TASK
or type_enum == TaskType.ACTOR_CREATION_TASK
):
# For actor tasks, the parent is the actor and not the parent task.
parent_task_group = get_or_create_actor_task_group(task["actor_id"])
if parent_task_group:
parent_task_group.children.append(task_group_by_id[task_id])
else:
parent_task_id = task["parent_task_id"]
if not parent_task_id or parent_task_id.startswith(
DRIVER_TASK_ID_PREFIX
):
summary.append(task_group_by_id[task_id])
else:
parent_task_group = get_or_create_task_group(parent_task_id)
if parent_task_group:
parent_task_group.children.append(task_group_by_id[task_id])
return task_group_by_id[task_id]
def get_or_create_actor_task_group(
actor_id: str,
) -> Optional[NestedTaskSummary]:
"""
Gets an existing task group that represents an actor.
OR
Creates a task group that represents an actor. The owner of the actor is
the parent of the creation_task that created that actor.
Returns None if there is missing data about the actor or one of its parents.
"""
key = f"actor:{actor_id}"
actor = actor_dict.get(actor_id)
if key not in task_group_by_id:
creation_task_id = actor_creation_task_id_for_actor_id.get(actor_id)
creation_task = tasks_by_id.get(creation_task_id)
if not creation_task:
logger.debug(f"We're missing data about actor {actor_id}")
# We're missing data about the parent. So we're dropping the whole
# tree at that node.
return None
# TODO(rickyx)
# We are using repr name for grouping actors if exists,
# else use class name. We should be using some group_name in the future.
if actor is None:
logger.debug(
f"We are missing actor info for actor {actor_id}, "
f"even though creation task exists: {creation_task}"
)
[actor_name, *rest] = creation_task["func_or_class_name"].split(".")
else:
actor_name = (
actor["repr_name"]
if actor["repr_name"]
else actor["class_name"]
)
task_group_by_id[key] = NestedTaskSummary(
name=actor_name,
key=key,
type="ACTOR",
timestamp=task["creation_time_ms"],
link=Link(type="actor", id=actor_id),
)
parent_task_id = creation_task["parent_task_id"]
if not parent_task_id or parent_task_id.startswith(
DRIVER_TASK_ID_PREFIX
):
summary.append(task_group_by_id[key])
else:
parent_task_group = get_or_create_task_group(parent_task_id)
if parent_task_group:
parent_task_group.children.append(task_group_by_id[key])
return task_group_by_id[key]
# Step 2: Create the tree structure based on ownership
for task in tasks:
task_id = task["task_id"]
task_group = get_or_create_task_group(task_id)
if not task_group:
# We are probably missing data about this task or one of its parents.
continue
state = task["state"]
if state not in task_group.state_counts:
task_group.state_counts[state] = 0
task_group.state_counts[state] += 1
type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number
if type_enum == TaskType.NORMAL_TASK:
total_tasks += 1
elif type_enum == TaskType.ACTOR_CREATION_TASK:
total_actor_scheduled += 1
elif type_enum == TaskType.ACTOR_TASK:
total_actor_tasks += 1
def merge_sibings_for_task_group(
siblings: List[NestedTaskSummary],
) -> Tuple[List[NestedTaskSummary], Optional[int]]:
"""
Merges task summaries with the same name into a group if there are more than
one child with that name.
Args:
siblings: A list of NestedTaskSummary's to merge together
Returns
Index 0: A list of NestedTaskSummary's which have been merged
Index 1: The smallest timestamp amongst the siblings
"""
if not len(siblings):
return siblings, None
# Group by name
groups = {}
min_timestamp = None
for child in siblings:
child.children, child_min_timestamp = merge_sibings_for_task_group(
child.children
)
if child_min_timestamp and child_min_timestamp < (
child.timestamp or sys.maxsize
):
child.timestamp = child_min_timestamp
if child.name not in groups:
groups[child.name] = NestedTaskSummary(
name=child.name,
key=child.name,
type="GROUP",
)
groups[child.name].children.append(child)
if child.timestamp and child.timestamp < (
groups[child.name].timestamp or sys.maxsize
):
groups[child.name].timestamp = child.timestamp
if child.timestamp < (min_timestamp or sys.maxsize):
min_timestamp = child.timestamp
# Take the groups that have more than one children and return it.
# For groups with just one child, return the child itself instead of
# creating a group.
return [
group if len(group.children) > 1 else group.children[0]
for group in groups.values()
], min_timestamp
# Step 3
summary, _ = merge_sibings_for_task_group(summary)
def get_running_tasks_count(task_group: NestedTaskSummary) -> int:
return (
task_group.state_counts.get("RUNNING", 0)
+ task_group.state_counts.get("RUNNING_IN_RAY_GET", 0)
+ task_group.state_counts.get("RUNNING_IN_RAY_WAIT", 0)
)
def get_pending_tasks_count(task_group: NestedTaskSummary) -> int:
return (
task_group.state_counts.get("PENDING_ARGS_AVAIL", 0)
+ task_group.state_counts.get("PENDING_NODE_ASSIGNMENT", 0)
+ task_group.state_counts.get("PENDING_OBJ_STORE_MEM_AVAIL", 0)
+ task_group.state_counts.get("PENDING_ARGS_FETCH", 0)
)
def sort_task_groups(task_groups: List[NestedTaskSummary]) -> None:
# Sort by running tasks, pending tasks, failed tasks, timestamp,
# and actor_creation_task
# Put actor creation tasks above other tasks with the same timestamp
task_groups.sort(key=lambda x: 0 if x.type == "ACTOR_CREATION_TASK" else 1)
task_groups.sort(key=lambda x: x.timestamp or sys.maxsize)
task_groups.sort(
key=lambda x: x.state_counts.get("FAIELD", 0), reverse=True
)
task_groups.sort(key=get_pending_tasks_count, reverse=True)
task_groups.sort(key=get_running_tasks_count, reverse=True)
def calc_total_for_task_group(
task_group: NestedTaskSummary,
) -> NestedTaskSummary:
"""
Calculates the total of a group as the sum of all children.
Sorts children by timestamp
"""
if not len(task_group.children):
return task_group
for child in task_group.children:
totaled = calc_total_for_task_group(child)
for state, count in totaled.state_counts.items():
task_group.state_counts[state] = (
task_group.state_counts.get(state, 0) + count
)
sort_task_groups(task_group.children)
return task_group
# Step 4
summary = [calc_total_for_task_group(task_group) for task_group in summary]
sort_task_groups(summary)
return TaskSummaries(
summary=summary,
total_tasks=total_tasks,
total_actor_tasks=total_actor_tasks,
total_actor_scheduled=total_actor_scheduled,
summary_by="lineage",
)
@dataclass(init=not IS_PYDANTIC_2)
| TaskSummaries |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 122822,
"end": 129487
} | class ____(fixtures.MappedTest):
"""Test that cascades are trimmed accordingly when viewonly is set.
Originally #4993 and #4994 this was raising an error for invalid
cascades. in 2.0 this is simplified to just remove the write
cascades, allows the default cascade to be reasonable.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30)),
)
Table(
"orders",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer),
Column("description", String(30)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Order(cls.Comparable):
pass
@testing.combinations(
({"delete"}, {"none"}),
(
{"all, delete-orphan"},
{"refresh-expire", "expunge", "merge"},
),
({"save-update, expunge"}, {"expunge"}),
)
def test_write_cascades(self, setting, expected):
Order = self.classes.Order
r = relationship(
Order,
primaryjoin=(
self.tables.users.c.id == foreign(self.tables.orders.c.user_id)
),
cascade=", ".join(sorted(setting)),
viewonly=True,
)
eq_(r.cascade, CascadeOptions(expected))
def test_expunge_cascade(self):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order,
primaryjoin=(
self.tables.users.c.id
== foreign(self.tables.orders.c.user_id)
),
cascade="expunge",
viewonly=True,
)
},
)
sess = fixture_session()
u = User(id=1, name="jack")
sess.add(u)
sess.add_all(
[
Order(id=1, user_id=1, description="someorder"),
Order(id=2, user_id=1, description="someotherorder"),
]
)
sess.commit()
u1 = sess.query(User).first()
orders = u1.orders
eq_(len(orders), 2)
in_(orders[0], sess)
in_(orders[1], sess)
sess.expunge(u1)
not_in(orders[0], sess)
not_in(orders[1], sess)
def test_default_none_cascade(self):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order,
primaryjoin=(
self.tables.users.c.id
== foreign(self.tables.orders.c.user_id)
),
viewonly=True,
)
},
)
sess = fixture_session()
u1 = User(id=1, name="jack")
sess.add(u1)
o1, o2 = (
Order(id=1, user_id=1, description="someorder"),
Order(id=2, user_id=1, description="someotherorder"),
)
u1.orders.append(o1)
u1.orders.append(o2)
not_in(o1, sess)
not_in(o2, sess)
@testing.combinations(
"persistent", "pending", argnames="collection_status"
)
def test_default_merge_cascade(self, collection_status):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order,
primaryjoin=(
self.tables.users.c.id
== foreign(self.tables.orders.c.user_id)
),
viewonly=True,
)
},
)
sess = fixture_session()
u1 = User(id=1, name="jack")
o1, o2 = (
Order(id=1, user_id=1, description="someorder"),
Order(id=2, user_id=1, description="someotherorder"),
)
if collection_status == "pending":
# technically this is pointless, one should not be appending
# to this collection
u1.orders.append(o1)
u1.orders.append(o2)
elif collection_status == "persistent":
sess.add(u1)
sess.flush()
sess.add_all([o1, o2])
sess.flush()
u1.orders
else:
assert False
u1 = sess.merge(u1)
# in 1.4, as of #4993 this was asserting that u1.orders would
# not be present in the new object. However, as observed during
# #8862, this defeats schemes that seek to restore fully loaded
# objects from caches which may even have lazy="raise", but
# in any case would want to not emit new SQL on those collections.
# so we assert here that u1.orders is in fact present
assert "orders" in u1.__dict__
assert u1.__dict__["orders"]
assert u1.orders
def test_default_cascade(self):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
self.mapper_registry.map_imperatively(Order, orders)
umapper = self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order,
primaryjoin=(
self.tables.users.c.id
== foreign(self.tables.orders.c.user_id)
),
viewonly=True,
)
},
)
eq_(umapper.attrs["orders"].cascade, {"merge"})
| ViewonlyCascadeUpdate |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess7.py | {
"start": 458,
"end": 640
} | class ____(metaclass=MetaClass):
pass
v1 = ClassB.some_function(3)
reveal_type(v1, expected_text="int")
v2 = ClassB.some_function("hi")
reveal_type(v2, expected_text="str")
| ClassB |
python | bokeh__bokeh | src/bokeh/core/property/any.py | {
"start": 1379,
"end": 2347
} | class ____(Property[typing.Any]):
""" Accept all values.
The ``Any`` property does not do any validation or transformation.
Args:
default (obj or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. (default: None)
Example:
.. code-block:: python
>>> class AnyModel(HasProps):
... prop = Any()
...
>>> m = AnyModel()
>>> m.prop = True
>>> m.prop = 10
>>> m.prop = 3.14
>>> m.prop = "foo"
>>> m.prop = [1, 2, 3]
"""
# TODO: default should be explicitly defined by the user (i.e. intrinsic here)
def __init__(self, default: Init[typing.Any] = None, help: str | None = None) -> None:
super().__init__(default=default, help=help)
| Any |
python | pytest-dev__pytest-cov | src/pytest_cov/__init__.py | {
"start": 194,
"end": 323
} | class ____(pytest.PytestWarning):
"""
The base for all pytest-cov warnings, never raised directly.
"""
| PytestCovWarning |
python | django__django | tests/sessions_tests/tests.py | {
"start": 33522,
"end": 33593
} | class ____(CacheDBSessionTests):
pass
| CacheDBSessionWithTimeZoneTests |
python | ray-project__ray | python/ray/util/actor_group.py | {
"start": 377,
"end": 516
} | class ____:
"""Class containing an actor and its metadata."""
actor: ActorHandle
metadata: ActorMetadata
@dataclass
| ActorWrapper |
python | getsentry__sentry | src/sentry/types/region.py | {
"start": 3929,
"end": 4042
} | class ____(Exception):
"""Indicate that the server is not in a state to resolve a region."""
| RegionContextError |
python | bokeh__bokeh | src/bokeh/models/annotations/geometry.py | {
"start": 17354,
"end": 19325
} | class ____(DataAnnotation):
''' Render a whisker along a dimension.
See :ref:`ug_basic_annotations_whiskers` for information on plotting whiskers.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
lower = UnitsSpec(default=field("lower"), units_enum=CoordinateUnits, units_default="data", help="""
The coordinates of the lower end of the whiskers.
""")
lower_head = Nullable(Instance(ArrowHead), default=InstanceDefault(TeeHead, size=10), help="""
Instance of ``ArrowHead``.
""")
upper = UnitsSpec(default=field("upper"), units_enum=CoordinateUnits, units_default="data", help="""
The coordinates of the upper end of the whiskers.
""")
upper_head = Nullable(Instance(ArrowHead), default=InstanceDefault(TeeHead, size=10), help="""
Instance of ``ArrowHead``.
""")
base = UnitsSpec(default=field("base"), units_enum=CoordinateUnits, units_default="data", help="""
The orthogonal coordinates of the upper and lower values.
""")
dimension = Enum(Dimension, default='height', help="""
The direction of the whisker can be specified by setting this property
to "height" (``y`` direction) or "width" (``x`` direction).
""")
line_props = Include(LineProps, help="""
The {prop} values for the whisker body.
""")
level = Override(default="underlay")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Whisker |
python | great-expectations__great_expectations | tests/expectations/metrics/query_metrics/test_query_metrics.py | {
"start": 6011,
"end": 7610
} | class ____(QueryRowCount):
metric_name = "my_query.row_count"
value_keys = ("my_query",)
query_param_name: ClassVar[str] = "my_query"
@pytest.mark.unit
@mock.patch.object(sa, "text")
@mock.patch.object(
QueryMetricProvider, "_get_substituted_batch_subquery_from_query_and_batch_selectable"
)
def test_sqlalchemy_query_row_count(
mock_get_substituted_batch_subquery_from_query_and_batch_selectable,
mock_sqlalchemy_text,
mock_sqlalchemy_execution_engine: MockSqlAlchemyExecutionEngine,
batch_selectable: sa.Table,
):
metric_value_kwargs = {
"query_param": "my_query",
"my_query": "SELECT * FROM {batch} WHERE passenger_count > 7",
}
mock_substituted_batch_subquery = "SELECT * FROM (my_table) WHERE passenger_count > 7"
mock_get_substituted_batch_subquery_from_query_and_batch_selectable.return_value = (
mock_substituted_batch_subquery
)
mock_sqlalchemy_text.return_value = "*"
with mock.patch.object(mock_sqlalchemy_execution_engine, "execute_query"):
MyQueryRowCount._sqlalchemy(
cls=MyQueryRowCount,
execution_engine=mock_sqlalchemy_execution_engine,
metric_domain_kwargs={},
metric_value_kwargs=metric_value_kwargs,
metrics={},
runtime_configuration={},
)
mock_get_substituted_batch_subquery_from_query_and_batch_selectable.assert_called_once_with(
query=metric_value_kwargs["my_query"],
batch_selectable=batch_selectable,
execution_engine=mock_sqlalchemy_execution_engine,
)
| MyQueryRowCount |
python | kamyu104__LeetCode-Solutions | Python/minimum-impossible-or.py | {
"start": 64,
"end": 294
} | class ____(object):
def minImpossibleOR(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lookup = set(nums)
return next(1<<i for i in xrange(31) if 1<<i not in lookup)
| Solution |
python | viewflow__viewflow | viewflow/workflow/flow/views/detail.py | {
"start": 453,
"end": 1387
} | class ____(generic.RedirectView):
"""Redirect for a flow.View node."""
def get_redirect_url(self, *args, **kwargs):
activation = self.request.activation
task = activation.task
flow_task = activation.flow_task
if activation.start.can_proceed() and flow_task.can_execute(
self.request.user, task
):
return flow_task.reverse("execute", args=[task.process_id, task.pk])
if activation.assign.can_proceed() and flow_task.can_assign(
self.request.user, task
):
return flow_task.reverse("assign", args=[task.process_id, task.pk])
if flow_task.can_view(self.request.user, task):
return flow_task.reverse("detail", args=[task.process_id, task.pk])
messages.success(
self.request, _("You have no rights to view this task"), fail_silently=True
)
return "/"
| UserIndexTaskView |
python | django__django | tests/m2m_multiple/models.py | {
"start": 301,
"end": 471
} | class ____(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
| Category |
python | kamyu104__LeetCode-Solutions | Python/minimize-the-difference-between-target-and-chosen-elements.py | {
"start": 50,
"end": 540
} | class ____(object):
def minimizeTheDifference(self, mat, target):
"""
:type mat: List[List[int]]
:type target: int
:rtype: int
"""
chosen_min = sum(min(row) for row in mat)
if chosen_min >= target:
return chosen_min-target
dp = {0}
for row in mat:
dp = {total+x for total in dp for x in row if (total+x)-target < target-chosen_min}
return min(abs(target-total) for total in dp)
| Solution |
python | scikit-image__scikit-image | benchmarks/benchmark_rank.py | {
"start": 225,
"end": 612
} | class ____:
param_names = ["filter_func", "shape"]
params = [sorted(all_rank_filters), [(32, 32), (256, 256)]]
def setup(self, filter_func, shape):
self.image = np.random.randint(0, 255, size=shape, dtype=np.uint8)
self.footprint = disk(1)
def time_filter(self, filter_func, shape):
getattr(rank, filter_func)(self.image, self.footprint)
| RankSuite |
python | Netflix__metaflow | metaflow/_vendor/click/exceptions.py | {
"start": 6930,
"end": 7282
} | class ____(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
def __init__(self, message, ctx=None):
UsageError.__init__(self, message, ctx)
| BadArgumentUsage |
python | scrapy__scrapy | scrapy/spidermiddlewares/urllength.py | {
"start": 510,
"end": 1494
} | class ____(BaseSpiderMiddleware):
crawler: Crawler
def __init__(self, maxlength: int): # pylint: disable=super-init-not-called
self.maxlength: int = maxlength
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
maxlength = crawler.settings.getint("URLLENGTH_LIMIT")
if not maxlength:
raise NotConfigured
o = cls(maxlength)
o.crawler = crawler
return o
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if len(request.url) <= self.maxlength:
return request
logger.info(
"Ignoring link (url length > %(maxlength)d): %(url)s ",
{"maxlength": self.maxlength, "url": request.url},
extra={"spider": self.crawler.spider},
)
assert self.crawler.stats
self.crawler.stats.inc_value("urllength/request_ignored_count")
return None
| UrlLengthMiddleware |
python | django-guardian__django-guardian | guardian/testapp/migrations/0008_fix_project_timezone.py | {
"start": 121,
"end": 453
} | class ____(migrations.Migration):
dependencies = [
("testapp", "0007_genericgroupobjectpermission"),
]
operations = [
migrations.AlterField(
model_name="project",
name="created_at",
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| Migration |
python | chroma-core__chroma | chromadb/utils/embedding_functions/roboflow_embedding_function.py | {
"start": 396,
"end": 5182
} | class ____(EmbeddingFunction[Embeddable]):
"""
This class is used to generate embeddings for a list of texts or images using the Roboflow API.
"""
def __init__(
self,
api_key: Optional[str] = None,
api_url: str = "https://infer.roboflow.com",
api_key_env_var: str = "CHROMA_ROBOFLOW_API_KEY",
) -> None:
"""
Create a RoboflowEmbeddingFunction.
Args:
api_key_env_var (str, optional): Environment variable name that contains your API key for the Roboflow API.
Defaults to "CHROMA_ROBOFLOW_API_KEY".
api_url (str, optional): The URL of the Roboflow API.
Defaults to "https://infer.roboflow.com".
"""
if api_key is not None:
warnings.warn(
"Direct api_key configuration will not be persisted. "
"Please use environment variables via api_key_env_var for persistent storage.",
DeprecationWarning,
)
if os.getenv("ROBOFLOW_API_KEY") is not None:
self.api_key_env_var = "ROBOFLOW_API_KEY"
else:
self.api_key_env_var = api_key_env_var
self.api_key = api_key or os.getenv(self.api_key_env_var)
if not self.api_key:
raise ValueError(
f"The {self.api_key_env_var} environment variable is not set."
)
self.api_url = api_url
try:
self._PILImage = importlib.import_module("PIL.Image")
except ImportError:
raise ValueError(
"The PIL python package is not installed. Please install it with `pip install pillow`"
)
self._httpx = importlib.import_module("httpx")
def __call__(self, input: Embeddable) -> Embeddings:
"""
Generate embeddings for the given documents or images.
Args:
input: Documents or images to generate embeddings for.
Returns:
Embeddings for the documents or images.
"""
embeddings = []
for item in input:
if is_image(item):
image = self._PILImage.fromarray(item)
buffer = BytesIO()
image.save(buffer, format="JPEG")
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
infer_clip_payload_image = {
"image": {
"type": "base64",
"value": base64_image,
},
}
res = self._httpx.post(
f"{self.api_url}/clip/embed_image?api_key={self.api_key}",
json=infer_clip_payload_image,
)
result = res.json()["embeddings"]
embeddings.append(np.array(result[0], dtype=np.float32))
elif is_document(item):
infer_clip_payload_text = {
"text": item,
}
res = self._httpx.post(
f"{self.api_url}/clip/embed_text?api_key={self.api_key}",
json=infer_clip_payload_text,
)
result = res.json()["embeddings"]
embeddings.append(np.array(result[0], dtype=np.float32))
# Cast to the expected Embeddings type
return cast(Embeddings, embeddings)
@staticmethod
def name() -> str:
return "roboflow"
def default_space(self) -> Space:
return "cosine"
def supported_spaces(self) -> List[Space]:
return ["cosine", "l2", "ip"]
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "EmbeddingFunction[Union[Documents, Images]]":
api_key_env_var = config.get("api_key_env_var")
api_url = config.get("api_url")
if api_key_env_var is None or api_url is None:
assert False, "This code should not be reached"
return RoboflowEmbeddingFunction(
api_key_env_var=api_key_env_var, api_url=api_url
)
def get_config(self) -> Dict[str, Any]:
return {"api_key_env_var": self.api_key_env_var, "api_url": self.api_url}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
# API URL can be changed, so no validation needed
pass
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "roboflow")
| RoboflowEmbeddingFunction |
python | pennersr__django-allauth | allauth/mfa/recovery_codes/views.py | {
"start": 3030,
"end": 3644
} | class ____(TemplateView):
template_name = "mfa/recovery_codes/index." + account_settings.TEMPLATE_EXTENSION
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
authenticator = flows.view_recovery_codes(self.request)
if not authenticator:
raise Http404()
ret.update(
{
"unused_codes": authenticator.wrap().get_unused_codes(),
"total_count": app_settings.RECOVERY_CODE_COUNT,
}
)
return ret
view_recovery_codes = ViewRecoveryCodesView.as_view()
| ViewRecoveryCodesView |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 64390,
"end": 65056
} | class ____(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module: _ModuleLike) -> None:
super().__init__(module)
self._setup_prefix()
def _setup_prefix(self):
# Assume that metadata may be nested inside a "basket"
# of multiple eggs and use module_path instead of .archive.
eggs = filter(_is_egg_path, _parents(self.module_path))
egg = next(eggs, None)
egg and self._set_egg(egg)
def _set_egg(self, path: str) -> None:
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
| EggProvider |
python | doocs__leetcode | solution/2600-2699/2603.Collect Coins in a Tree/Solution.py | {
"start": 0,
"end": 787
} | class ____:
def collectTheCoins(self, coins: List[int], edges: List[List[int]]) -> int:
g = defaultdict(set)
for a, b in edges:
g[a].add(b)
g[b].add(a)
n = len(coins)
q = deque(i for i in range(n) if len(g[i]) == 1 and coins[i] == 0)
while q:
i = q.popleft()
for j in g[i]:
g[j].remove(i)
if coins[j] == 0 and len(g[j]) == 1:
q.append(j)
g[i].clear()
for k in range(2):
q = [i for i in range(n) if len(g[i]) == 1]
for i in q:
for j in g[i]:
g[j].remove(i)
g[i].clear()
return sum(len(g[a]) > 0 and len(g[b]) > 0 for a, b in edges) * 2
| Solution |
python | pytorch__pytorch | torch/export/dynamic_shapes.py | {
"start": 26508,
"end": 29875
} | class ____:
"""
Builder for dynamic_shapes.
Used to assign dynamic shape specifications to tensors that appear in inputs.
This is useful particularly when :func:`args` is a nested input structure, and it's
easier to index the input tensors, than to replicate the structure of :func:`args` in
the :func:`dynamic_shapes` specification.
Example::
args = {"x": tensor_x, "others": [tensor_y, tensor_z]}
dim = torch.export.Dim(...)
dynamic_shapes = torch.export.ShapesCollection()
dynamic_shapes[tensor_x] = (dim, dim + 1, 8)
dynamic_shapes[tensor_y] = {0: dim * 2}
# This is equivalent to the following (now auto-generated):
# dynamic_shapes = {"x": (dim, dim + 1, 8), "others": [{0: dim * 2}, None]}
torch.export(..., args, dynamic_shapes=dynamic_shapes)
To specify dynamism for integers, we need to first wrap the integers using
_IntWrapper so that we have a "unique identification tag" for each integer.
Example::
args = {"x": tensor_x, "others": [int_x, int_y]}
# Wrap all ints with _IntWrapper
mapped_args = pytree.tree_map_only(int, lambda a: _IntWrapper(a), args)
dynamic_shapes = torch.export.ShapesCollection()
dynamic_shapes[tensor_x] = (dim, dim + 1, 8)
dynamic_shapes[mapped_args["others"][0]] = Dim.DYNAMIC
# This is equivalent to the following (now auto-generated):
# dynamic_shapes = {"x": (dim, dim + 1, 8), "others": [Dim.DYNAMIC, None]}
torch.export(..., args, dynamic_shapes=dynamic_shapes)
"""
def __init__(self):
self._shapes = {}
def __setitem__(self, t, shape):
assert isinstance(t, (torch.Tensor, _IntWrapper)), (
f"Cannot assign shape to non-tensor or non-_IntWrapper type {type(t)}"
)
# TODO(avik): check that shape is indeed a Shape
t_id = id(t)
if t_id in self._shapes:
_shape = self._shapes[t_id]
assert shape == _shape, (
f"Shapes assigned to input do not match: expected {_shape}, got {shape}"
)
else:
self._shapes[id(t)] = shape
def __getitem__(self, t):
t_id = id(t)
if t_id not in self._shapes:
self._shapes[t_id] = {}
return self._shapes[t_id]
def __len__(self):
return len(self._shapes)
def dynamic_shapes(self, m, args, kwargs=None):
"""
Generates the :func:`dynamic_shapes` pytree structure according to :func:`args` and :func:`kwargs`.
"""
t_ids = set()
def find_shape(path, t):
t_id = id(t)
if t_id in self._shapes:
t_ids.add(t_id)
return self._shapes[t_id]
else:
return None
combined_args = _combine_args(m, args, kwargs)
dynamic_shapes = _tree_map_with_path(find_shape, combined_args)
if any(t_id not in t_ids for t_id in self._shapes):
raise ValueError(
"Some tensors that were assigned shapes were not found in args. "
"Maybe such tensors were copied when passing them as args? "
"Maybe such tensors are contained in classes that were not registered with pytree?"
)
return dynamic_shapes
| ShapesCollection |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_be_polygon_area_between.py | {
"start": 1115,
"end": 2867
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.polygon_area"
condition_value_keys = (
"min_area",
"max_area",
"crs",
)
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, min_area, max_area, crs="epsg:4326", **kwargs):
# Convert from json/dict to polygon/multipolygon
column = column.apply(shape)
column = geopandas.GeoSeries(column)
# Set crs so geopandas knows how the data is represented
column = column.set_crs(crs)
# Convert from current representation to an equal area representation
column = column.to_crs({"proj": "cea"})
# Divide to get area in squared kilometers
column_array = column.area / 10**6
return (column_array >= min_area) & (column_array <= max_area)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesPolygonArea |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.