language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/maximize-score-after-pair-deletions.py | {
"start": 38,
"end": 283
} | class ____(object):
def maxScore(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(nums)-min(nums) if len(nums)%2 else sum(nums)-min(nums[i]+nums[i+1] for i in xrange(len(nums)-1))
| Solution |
python | huggingface__transformers | src/transformers/models/parakeet/configuration_parakeet.py | {
"start": 6987,
"end": 10429
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ParakeetForCTC`]. It is used to instantiate a
Parakeet CTC model according to the specified arguments, defining the model architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 1025):
Vocabulary size of the model.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`ParakeetForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `True`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`ParakeetForCTC`].
encoder_config (`Union[dict, ParakeetEncoderConfig]`, *optional*):
The config object or dictionary of the encoder.
pad_token_id (`int`, *optional*, defaults to 1024):
Padding token id. Also used as blank token id.
Example:
```python
>>> from transformers import ParakeetForCTC, ParakeetCTCConfig
>>> # Initializing a Parakeet configuration
>>> configuration = ParakeetCTCConfig()
>>> # Initializing a model from the configuration
>>> model = ParakeetForCTC(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
This configuration class is based on the Parakeet CTC architecture from NVIDIA NeMo. You can find more details
and pre-trained models at [nvidia/parakeet-ctc-1.1b](https://huggingface.co/nvidia/parakeet-ctc-1.1b).
"""
model_type = "parakeet_ctc"
sub_configs = {"encoder_config": ParakeetEncoderConfig}
def __init__(
self,
vocab_size=1025,
ctc_loss_reduction="mean",
ctc_zero_infinity=True,
encoder_config: Union[dict, ParakeetEncoderConfig] = None,
pad_token_id=1024,
**kwargs,
):
self.vocab_size = vocab_size
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
if isinstance(encoder_config, dict):
self.encoder_config = ParakeetEncoderConfig(**encoder_config)
elif encoder_config is None:
self.encoder_config = ParakeetEncoderConfig()
self.encoder_config = self.encoder_config
self.initializer_range = self.encoder_config.initializer_range
super().__init__(
pad_token_id=pad_token_id,
**kwargs,
)
@classmethod
def from_encoder_config(cls, encoder_config: ParakeetEncoderConfig, **kwargs):
r"""
Instantiate a [`ParakeetCTCConfig`] (or a derived class) from parakeet encoder model configuration.
Returns:
[`ParakeetCTCConfig`]: An instance of a configuration object
"""
return cls(encoder_config=encoder_config.to_dict(), **kwargs)
__all__ = ["ParakeetCTCConfig", "ParakeetEncoderConfig"]
| ParakeetCTCConfig |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 47063,
"end": 53129
} | class ____:
all_transitions = set(product(ALL_ORCHESTRATION_STATES, CANONICAL_STATES))
terminal_transitions = set(product(TERMINAL_STATES, ALL_ORCHESTRATION_STATES))
# Cast to sorted lists for deterministic ordering.
# Sort as strings to handle `None`.
active_transitions = list(
sorted(all_transitions - terminal_transitions, key=lambda item: str(item))
)
all_transitions = list(sorted(all_transitions, key=lambda item: str(item)))
terminal_transitions = list(
sorted(terminal_transitions, key=lambda item: str(item))
)
@pytest.mark.parametrize(
"intended_transition",
[(terminal_state, StateType.CRASHED) for terminal_state in TERMINAL_STATES],
ids=transition_names,
)
async def test_transitions_from_terminal_states_to_crashed_are_aborted(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
)
if run_type == "task":
protection_rule = HandleTaskTerminalStateTransitions
elif run_type == "flow":
protection_rule = HandleFlowTerminalStateTransitions
state_protection = protection_rule(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ABORT
@pytest.mark.parametrize(
"intended_transition",
[(terminal_state, StateType.CANCELLING) for terminal_state in TERMINAL_STATES],
ids=transition_names,
)
async def test_transitions_from_terminal_states_to_cancelling_are_aborted(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
)
if run_type == "task":
protection_rule = HandleTaskTerminalStateTransitions
elif run_type == "flow":
protection_rule = HandleFlowTerminalStateTransitions
state_protection = protection_rule(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ABORT
@pytest.mark.parametrize(
"intended_transition",
[
(StateType.COMPLETED, state)
for state in ALL_ORCHESTRATION_STATES
if state
and state not in TERMINAL_STATES
and state
not in {
StateType.CANCELLING,
StateType.PAUSED,
}
],
ids=transition_names,
)
async def test_transitions_from_completed_to_non_final_states_allowed_without_persisted_result(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
if run_type == "flow" and intended_transition[1] == StateType.SCHEDULED:
pytest.skip(
"Flow runs cannot transition back to a SCHEDULED state without a"
" deployment"
)
ctx = await initialize_orchestration(
session, run_type, *intended_transition, initial_state_data=None
)
if run_type == "task":
protection_rule = HandleTaskTerminalStateTransitions
elif run_type == "flow":
protection_rule = HandleFlowTerminalStateTransitions
state_protection = protection_rule(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
@pytest.mark.parametrize(
"intended_transition",
[
(StateType.COMPLETED, state)
for state in ALL_ORCHESTRATION_STATES
if state
and state not in TERMINAL_STATES
and state
not in {
StateType.CANCELLING,
StateType.PAUSED,
}
],
ids=transition_names,
)
async def test_transitions_from_completed_to_non_final_states_rejected_with_persisted_result(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
initial_state_data=ResultRecordMetadata.model_construct().model_dump(),
)
if run_type == "task":
protection_rule = HandleTaskTerminalStateTransitions
elif run_type == "flow":
protection_rule = HandleFlowTerminalStateTransitions
state_protection = protection_rule(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.REJECT, ctx.response_details
@pytest.mark.parametrize(
"intended_transition", active_transitions, ids=transition_names
)
async def test_does_not_block_transitions_from_non_terminal_states(
self,
session,
run_type,
initialize_orchestration,
intended_transition,
):
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
)
if run_type == "task":
protection_rule = HandleTaskTerminalStateTransitions
elif run_type == "flow":
protection_rule = HandleFlowTerminalStateTransitions
state_protection = protection_rule(ctx, *intended_transition)
async with state_protection as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.ACCEPT
@pytest.mark.parametrize("run_type", ["flow"])
| TestTransitionsFromTerminalStatesRule |
python | ray-project__ray | rllib/models/torch/torch_action_dist.py | {
"start": 3413,
"end": 7038
} | class ____(TorchDistributionWrapper):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
@override(TorchDistributionWrapper)
def __init__(
self,
inputs: List[TensorType],
model: TorchModelV2,
input_lens: Union[List[int], np.ndarray, Tuple[int, ...]],
action_space=None,
):
super().__init__(inputs, model)
# If input_lens is np.ndarray or list, force-make it a tuple.
inputs_split = self.inputs.split(tuple(input_lens), dim=1)
self.cats = [
torch.distributions.categorical.Categorical(logits=input_)
for input_ in inputs_split
]
# Used in case we are dealing with an Int Box.
self.action_space = action_space
@override(TorchDistributionWrapper)
def sample(self) -> TensorType:
arr = [cat.sample() for cat in self.cats]
sample_ = torch.stack(arr, dim=1)
if isinstance(self.action_space, gym.spaces.Box):
sample_ = torch.reshape(sample_, [-1] + list(self.action_space.shape))
self.last_sample = sample_
return sample_
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
arr = [torch.argmax(cat.probs, -1) for cat in self.cats]
sample_ = torch.stack(arr, dim=1)
if isinstance(self.action_space, gym.spaces.Box):
sample_ = torch.reshape(sample_, [-1] + list(self.action_space.shape))
self.last_sample = sample_
return sample_
@override(TorchDistributionWrapper)
def logp(self, actions: TensorType) -> TensorType:
# # If tensor is provided, unstack it into list.
if isinstance(actions, torch.Tensor):
if isinstance(self.action_space, gym.spaces.Box):
actions = torch.reshape(
actions, [-1, int(np.prod(self.action_space.shape))]
)
actions = torch.unbind(actions, dim=1)
logps = torch.stack([cat.log_prob(act) for cat, act in zip(self.cats, actions)])
return torch.sum(logps, dim=0)
@override(ActionDistribution)
def multi_entropy(self) -> TensorType:
return torch.stack([cat.entropy() for cat in self.cats], dim=1)
@override(TorchDistributionWrapper)
def entropy(self) -> TensorType:
return torch.sum(self.multi_entropy(), dim=1)
@override(ActionDistribution)
def multi_kl(self, other: ActionDistribution) -> TensorType:
return torch.stack(
[
torch.distributions.kl.kl_divergence(cat, oth_cat)
for cat, oth_cat in zip(self.cats, other.cats)
],
dim=1,
)
@override(TorchDistributionWrapper)
def kl(self, other: ActionDistribution) -> TensorType:
return torch.sum(self.multi_kl(other), dim=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
# Int Box.
if isinstance(action_space, gym.spaces.Box):
assert action_space.dtype.name.startswith("int")
low_ = np.min(action_space.low)
high_ = np.max(action_space.high)
assert np.all(action_space.low == low_)
assert np.all(action_space.high == high_)
return np.prod(action_space.shape, dtype=np.int32) * (high_ - low_ + 1)
# MultiDiscrete space.
else:
# `nvec` is already integer. No need to cast.
return np.sum(action_space.nvec)
@OldAPIStack
| TorchMultiCategorical |
python | django__django | tests/model_fields/test_uuid.py | {
"start": 3246,
"end": 7966
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
NullableUUIDModel.objects.create(
field=uuid.UUID("25d405be-4895-4d50-9b2e-d6695359ce47"),
),
NullableUUIDModel.objects.create(field="550e8400e29b41d4a716446655440000"),
NullableUUIDModel.objects.create(field=None),
]
def assertSequenceEqualWithoutHyphens(self, qs, result):
"""
Backends with a native datatype for UUID don't support fragment lookups
without hyphens because they store values with them.
"""
self.assertSequenceEqual(
qs,
[] if connection.features.has_native_uuid_field else result,
)
def test_exact(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(
field__exact="550e8400e29b41d4a716446655440000"
),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(
field__exact="550e8400-e29b-41d4-a716-446655440000"
),
[self.objs[1]],
)
def test_iexact(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(
field__iexact="550E8400E29B41D4A716446655440000"
),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(
field__iexact="550E8400-E29B-41D4-A716-446655440000"
),
[self.objs[1]],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__isnull=True), [self.objs[2]]
)
def test_contains(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(field__contains="8400e29b"),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__contains="8400-e29b"),
[self.objs[1]],
)
def test_icontains(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(field__icontains="8400E29B"),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__icontains="8400-E29B"),
[self.objs[1]],
)
def test_startswith(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(field__startswith="550e8400e29b4"),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__startswith="550e8400-e29b-4"),
[self.objs[1]],
)
def test_istartswith(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(field__istartswith="550E8400E29B4"),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__istartswith="550E8400-E29B-4"),
[self.objs[1]],
)
def test_endswith(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(field__endswith="a716446655440000"),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__endswith="a716-446655440000"),
[self.objs[1]],
)
def test_iendswith(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.filter(field__iendswith="A716446655440000"),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__iendswith="A716-446655440000"),
[self.objs[1]],
)
def test_filter_with_expr(self):
self.assertSequenceEqualWithoutHyphens(
NullableUUIDModel.objects.annotate(
value=Concat(Value("8400"), Value("e29b"), output_field=CharField()),
).filter(field__contains=F("value")),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.annotate(
value=Concat(
Value("8400"), Value("-"), Value("e29b"), output_field=CharField()
),
).filter(field__contains=F("value")),
[self.objs[1]],
)
self.assertSequenceEqual(
NullableUUIDModel.objects.annotate(
value=Repeat(Value("0"), 4, output_field=CharField()),
).filter(field__contains=F("value")),
[self.objs[1]],
)
| TestQuerying |
python | astropy__astropy | astropy/coordinates/tests/test_shape_manipulation.py | {
"start": 552,
"end": 2657
} | class ____:
@classmethod
def setup_class(cls):
# For these tests, we set up frames and coordinates using copy=False,
# so we can check that broadcasting is handled correctly.
lon = Longitude(np.arange(0, 24, 4), u.hourangle)
lat = Latitude(np.arange(-90, 91, 30), u.deg)
# With same-sized arrays, no attributes.
cls.s0 = ICRS(
lon[:, np.newaxis] * np.ones(lat.shape),
lat * np.ones(lon.shape)[:, np.newaxis],
copy=False,
)
# Make an AltAz frame since that has many types of attributes.
# Match one axis with times.
cls.obstime = Time("2012-01-01") + np.arange(len(lon))[:, np.newaxis] * u.s
# And another with location.
cls.location = EarthLocation(20.0 * u.deg, lat, 100 * u.m)
# Ensure we have a quantity scalar.
cls.pressure = 1000 * u.hPa
# As well as an array.
cls.temperature = (
np.random.uniform(0.0, 20.0, size=(lon.size, lat.size)) * u.deg_C
)
cls.s1 = AltAz(
az=lon[:, np.newaxis],
alt=lat,
obstime=cls.obstime,
location=cls.location,
pressure=cls.pressure,
temperature=cls.temperature,
copy=False,
)
# For some tests, also try a GCRS, since that has representation
# attributes. We match the second dimension (via the location)
cls.obsgeoloc, cls.obsgeovel = cls.location.get_gcrs_posvel(cls.obstime[0, 0])
cls.s2 = GCRS(
ra=lon[:, np.newaxis],
dec=lat,
obstime=cls.obstime,
obsgeoloc=cls.obsgeoloc,
obsgeovel=cls.obsgeovel,
copy=False,
)
# For completeness, also some tests on an empty frame.
cls.s3 = GCRS(
obstime=cls.obstime,
obsgeoloc=cls.obsgeoloc,
obsgeovel=cls.obsgeovel,
copy=False,
)
# And make a SkyCoord
cls.sc = SkyCoord(ra=lon[:, np.newaxis], dec=lat, frame=cls.s3, copy=False)
| ShapeSetup |
python | django__django | tests/template_tests/test_nodelist.py | {
"start": 918,
"end": 1353
} | class ____(SimpleTestCase):
def test_textnode_repr(self):
engine = Engine()
for temptext, reprtext in [
("Hello, world!", "<TextNode: 'Hello, world!'>"),
("One\ntwo.", "<TextNode: 'One\\ntwo.'>"),
]:
template = engine.from_string(temptext)
texts = template.nodelist.get_nodes_by_type(TextNode)
self.assertEqual(repr(texts[0]), reprtext)
| TextNodeTest |
python | keon__algorithms | tests/test_maths.py | {
"start": 788,
"end": 1390
} | class ____(unittest.TestCase):
"""
Test for the file power.py
Arguments:
unittest {[type]} -- [description]
"""
def test_power(self):
self.assertEqual(8, power(2, 3))
self.assertEqual(1, power(5, 0))
self.assertEqual(0, power(10, 3, 5))
self.assertEqual(280380, power(2265, 1664, 465465))
def test_power_recur(self):
self.assertEqual(8, power_recur(2, 3))
self.assertEqual(1, power_recur(5, 0))
self.assertEqual(0, power_recur(10, 3, 5))
self.assertEqual(280380, power_recur(2265, 1664, 465465))
| TestPower |
python | run-llama__llama_index | llama-index-core/llama_index/core/postprocessor/sbert_rerank.py | {
"start": 420,
"end": 3577
} | class ____(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
trust_remote_code: bool = Field(
default=False,
description="Whether to trust remote code.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: bool = False,
trust_remote_code: bool = True,
):
try:
from sentence_transformers import CrossEncoder # pants: no-infer-dep
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
device = infer_torch_device() if device is None else device
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
)
self._model = CrossEncoder(
model,
max_length=DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH,
device=device,
trust_remote_code=trust_remote_code,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = score
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
return new_nodes
| SentenceTransformerRerank |
python | langchain-ai__langchain | libs/core/langchain_core/prompts/chat.py | {
"start": 22479,
"end": 22702
} | class ____(_StringImageMessagePromptTemplate):
"""System message prompt template.
This is a message that is not sent to the user.
"""
_msg_class: type[BaseMessage] = SystemMessage
| SystemMessagePromptTemplate |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 31884,
"end": 32133
} | class ____(models.Model):
username = models.CharField(max_length=20)
password = models.CharField(max_length=100)
# Three different styles of serializer.
# All should allow ordering by username, but not by password.
| SensitiveOrderingFilterModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 1795,
"end": 1851
} | class ____(ParentClosed4):
b: list[str]
| ChildClosed4_7 |
python | scipy__scipy | scipy/optimize/_optimize.py | {
"start": 107719,
"end": 140160
} | class ____(RuntimeError):
pass
def _recover_from_bracket_error(solver, fun, bracket, args, **options):
# `bracket` was originally written without checking whether the resulting
# bracket is valid. `brent` and `golden` built on top of it without
# checking the returned bracket for validity, and their output can be
# incorrect without warning/error if the original bracket is invalid.
# gh-14858 noticed the problem, and the following is the desired
# behavior:
# - `scipy.optimize.bracket`, `scipy.optimize.brent`, and
# `scipy.optimize.golden` should raise an error if the bracket is
# invalid, as opposed to silently returning garbage
# - `scipy.optimize.minimize_scalar` should return with `success=False`
# and other information
# The changes that would be required to achieve this the traditional
# way (`return`ing all the required information from bracket all the way
# up to `minimizer_scalar`) are extensive and invasive. (See a6aa40d.)
# We can achieve the same thing by raising the error in `bracket`, but
# storing the information needed by `minimize_scalar` in the error object,
# and intercepting it here.
try:
res = solver(fun, bracket, args, **options)
except BracketError as e:
msg = str(e)
xa, xb, xc, fa, fb, fc, funcalls = e.data
xs, fs = [xa, xb, xc], [fa, fb, fc]
if np.any(np.isnan([xs, fs])):
x, fun = np.nan, np.nan
else:
imin = np.argmin(fs)
x, fun = xs[imin], fs[imin]
return OptimizeResult(fun=fun, nfev=funcalls, x=x,
nit=0, success=False, message=msg)
return res
def _line_for_search(x0, alpha, lower_bound, upper_bound):
"""
Given a parameter vector ``x0`` with length ``n`` and a direction
vector ``alpha`` with length ``n``, and lower and upper bounds on
each of the ``n`` parameters, what are the bounds on a scalar
``l`` such that ``lower_bound <= x0 + alpha * l <= upper_bound``.
Parameters
----------
x0 : np.array.
The vector representing the current location.
Note ``np.shape(x0) == (n,)``.
alpha : np.array.
The vector representing the direction.
Note ``np.shape(alpha) == (n,)``.
lower_bound : np.array.
The lower bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded below, then ``lower_bound[i]``
should be ``-np.inf``.
Note ``np.shape(lower_bound) == (n,)``.
upper_bound : np.array.
The upper bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded above, then ``upper_bound[i]``
should be ``np.inf``.
Note ``np.shape(upper_bound) == (n,)``.
Returns
-------
res : tuple ``(lmin, lmax)``
The bounds for ``l`` such that
``lower_bound[i] <= x0[i] + alpha[i] * l <= upper_bound[i]``
for all ``i``.
"""
# get nonzero indices of alpha so we don't get any zero division errors.
# alpha will not be all zero, since it is called from _linesearch_powell
# where we have a check for this.
nonzero, = alpha.nonzero()
lower_bound, upper_bound = lower_bound[nonzero], upper_bound[nonzero]
x0, alpha = x0[nonzero], alpha[nonzero]
low = (lower_bound - x0) / alpha
high = (upper_bound - x0) / alpha
# positive and negative indices
pos = alpha > 0
lmin_pos = np.where(pos, low, 0)
lmin_neg = np.where(pos, 0, high)
lmax_pos = np.where(pos, high, 0)
lmax_neg = np.where(pos, 0, low)
lmin = np.max(lmin_pos + lmin_neg)
lmax = np.min(lmax_pos + lmax_neg)
# if x0 is outside the bounds, then it is possible that there is
# no way to get back in the bounds for the parameters being updated
# with the current direction alpha.
# when this happens, lmax < lmin.
# If this is the case, then we can just return (0, 0)
return (lmin, lmax) if lmax >= lmin else (0, 0)
def _linesearch_powell(func, p, xi, tol=1e-3,
lower_bound=None, upper_bound=None, fval=None):
"""Line-search algorithm using fminbound.
Find the minimum of the function ``func(x0 + alpha*direc)``.
lower_bound : np.array.
The lower bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded below, then ``lower_bound[i]``
should be ``-np.inf``.
Note ``np.shape(lower_bound) == (n,)``.
upper_bound : np.array.
The upper bounds for each parameter in ``x0``. If the ``i``th
parameter in ``x0`` is unbounded above, then ``upper_bound[i]``
should be ``np.inf``.
Note ``np.shape(upper_bound) == (n,)``.
fval : number.
``fval`` is equal to ``func(p)``, the idea is just to avoid
recomputing it so we can limit the ``fevals``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
# if xi is zero, then don't optimize
if not np.any(xi):
return ((fval, p, xi) if fval is not None else (func(p), p, xi))
elif lower_bound is None and upper_bound is None:
# non-bounded minimization
res = _recover_from_bracket_error(_minimize_scalar_brent,
myfunc, None, tuple(), xtol=tol)
alpha_min, fret = res.x, res.fun
xi = alpha_min * xi
return fret, p + xi, xi
else:
bound = _line_for_search(p, xi, lower_bound, upper_bound)
if np.isneginf(bound[0]) and np.isposinf(bound[1]):
# equivalent to unbounded
return _linesearch_powell(func, p, xi, fval=fval, tol=tol)
elif not np.isneginf(bound[0]) and not np.isposinf(bound[1]):
# we can use a bounded scalar minimization
res = _minimize_scalar_bounded(myfunc, bound, xatol=tol / 100)
xi = res.x * xi
return res.fun, p + xi, xi
else:
# only bounded on one side. use the tangent function to convert
# the infinity bound to a finite bound. The new bounded region
# is a subregion of the region bounded by -np.pi/2 and np.pi/2.
bound = np.arctan(bound[0]), np.arctan(bound[1])
res = _minimize_scalar_bounded(
lambda x: myfunc(np.tan(x)),
bound,
xatol=tol / 100)
xi = np.tan(res.x) * xi
return res.fun, p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method.
This method only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, ``fopt``, ``xi``, ``direc``, ``iter``, ``funcalls``, and
``warnflag`` are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial fitting step and parameter order set as an (N, N) array, where N
is the number of fitting parameters in `x0`. Defaults to step size 1.0
fitting all parameters simultaneously (``np.eye((N, N))``). To
prevent initial consideration of values in a step or to change initial
step size, set to 0 or desired step size in the Jth position in the Mth
block, where J is the position in `x0` and M is the desired evaluation
step, with steps being evaluated in index order. Step size and ordering
will change freely as minimization proceeds.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
3 : NaN result encountered.
4 : The result is out of the provided bounds.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' method in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop merely iterates over the inner
loop. The inner loop minimizes over each current direction in the direction
set. At the end of the inner loop, if certain conditions are met, the
direction that gave the largest decrease is dropped and replaced with the
difference between the current estimated x and the estimated x from the
beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin_powell(f, -1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 2
Function evaluations: 16
>>> minimum
array(0.0)
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
callback = _wrap_callback(callback)
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None, bounds=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Parameters
----------
fun : callable
The objective function to be minimized::
fun(x, *args) -> float
where ``x`` is a 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where ``n`` is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
The present documentation is specific to ``method='powell'``, but other
options are available. See documentation for `scipy.optimize.minimize`.
bounds : sequence or `Bounds`, optional
Bounds on decision variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
If bounds are not provided, then an unbounded line search will be used.
If bounds are provided and the initial guess is within the bounds, then
every function evaluation throughout the minimization procedure will be
within the bounds. If bounds are provided, the initial guess is outside
the bounds, and `direc` is full rank (or left to default), then some
function evaluations during the first iteration may be outside the
bounds, but every function evaluation after the first iteration will be
within the bounds. If `direc` is not full rank, then some parameters
may not be optimized and the solution is not guaranteed to be within
the bounds.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
disp : bool
Set to True to print convergence messages.
See method-specific options for ``method='powell'`` below.
callback : callable, optional
Called after each iteration. The signature is::
callback(xk)
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 1000
maxfun = N * 1000
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 1000
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 1000
else:
maxfun = np.inf
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun)
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
if np.linalg.matrix_rank(direc) != direc.shape[0]:
warnings.warn("direc input is not full rank, some parameters may "
"not be optimized",
OptimizeWarning, stacklevel=3)
if bounds is None:
# don't make these arrays of all +/- inf. because
# _linesearch_powell will do an unnecessary check of all the elements.
# just keep them None, _linesearch_powell will not have to check
# all the elements.
lower_bound, upper_bound = None, None
else:
# bounds is standardized in _minimize.py.
lower_bound, upper_bound = bounds.lb, bounds.ub
if np.any(lower_bound > x0) or np.any(x0 > upper_bound):
warnings.warn("Initial guess is not within the specified bounds",
OptimizeWarning, stacklevel=3)
fval = func(x)
x1 = x.copy()
iter = 0
while True:
try:
fx = fval
bigind = 0
delta = 0.0
for i in range(N):
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if retall:
allvecs.append(x)
intermediate_result = OptimizeResult(x=x, fun=fval)
if _call_callback_maybe_halt(callback, intermediate_result):
break
bnd = ftol * (np.abs(fx) + np.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
if np.isnan(fx) and np.isnan(fval):
# Ended up in a nan-region: bail out
break
# Construct the extrapolated point
direc1 = x - x1
x1 = x.copy()
# make sure that we don't go outside the bounds when extrapolating
if lower_bound is None and upper_bound is None:
lmax = 1
else:
_, lmax = _line_for_search(x, direc1, lower_bound, upper_bound)
x2 = x + min(lmax, 1) * direc1
fx2 = func(x2)
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(
func, x, direc1,
tol=xtol * 100,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval
)
if np.any(direc1):
direc[bigind] = direc[-1]
direc[-1] = direc1
except _MaxFuncCallError:
break
warnflag = 0
msg = _status_message['success']
# out of bounds is more urgent than exceeding function evals or iters,
# but I don't want to cause inconsistencies by changing the
# established warning flags for maxfev and maxiter, so the out of bounds
# warning flag becomes 3, but is checked for first.
if bounds and (np.any(lower_bound > x) or np.any(x > upper_bound)):
warnflag = 4
msg = _status_message['out_of_bounds']
elif fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
elif np.isnan(fval) or np.isnan(x).any():
warnflag = 3
msg = _status_message['nan']
if disp:
_print_success_message_or_warn(warnflag, msg, RuntimeWarning)
print(f" Current function value: {fval:f}")
print(f" Iterations: {iter:d}")
print(f" Function evaluations: {fcalls[0]:d}")
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
return
if flag == 1:
msg = ("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
elif flag == 2:
msg = f"\n{_status_message['nan']}"
_print_success_message_or_warn(flag, msg)
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False, workers=1):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e., computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
The brute force approach is inefficient because the number of grid points
increases exponentially - the number of grid points to evaluate is
``Ns ** len(x)``. Consequently, even with coarse grid spacing, even
moderately sized problems can take a long time to run, and/or run into
memory limitations.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages from the `finish` callable.
workers : int or map-like callable, optional
If `workers` is an int the grid is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`).
Supply `-1` to use all cores available to the Process.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the grid in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable.
.. versionadded:: 1.3.0
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, i.e., ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the global minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, i.e., to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary. Thus, if a
minimum only needs to be found over the provided grid points, make
sure to pass in ``finish=None``.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> import numpy as np
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if not isinstance(lrange[k], slice):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
grid = np.mgrid[lrange]
# obtain an array of parameters that is iterable by a map-like callable
inpt_shape = grid.shape
if (N > 1):
grid = np.reshape(grid, (inpt_shape[0], np.prod(inpt_shape[1:]))).T
if not np.iterable(args):
args = (args,)
wrapped_func = _Brute_Wrapper(func, args)
# iterate over input arrays, possibly in parallel
with MapWrapper(pool=workers) as mapper:
Jout = np.array(list(mapper(wrapped_func, grid)))
if (N == 1):
grid = (grid,)
Jout = np.squeeze(Jout)
elif (N > 1):
Jout = np.reshape(Jout, inpt_shape[1:])
grid = np.reshape(grid.T, inpt_shape)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = np.empty(N, int)
xmin = np.empty(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getfullargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g., if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
warnings.warn("Either final optimization did not succeed or `finish` "
"does not return `statuscode` as its last argument.",
RuntimeWarning, stacklevel=2)
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
| BracketError |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/struct_store/sql_query.py | {
"start": 20899,
"end": 23030
} | class ____(BaseSQLTableQueryEngine):
"""
PGvector SQL query engine.
A modified version of the normal text-to-SQL query engine because
we can infer embedding vectors in the sql query.
NOTE: this is a beta feature
NOTE: Any Text-to-SQL application should be aware that executing
arbitrary SQL queries can be a security risk. It is recommended to
take precautions as needed, such as using restricted roles, read-only
databases, sandboxing, etc.
"""
def __init__(
self,
sql_database: SQLDatabase,
llm: Optional[LLM] = None,
text_to_sql_prompt: Optional[BasePromptTemplate] = None,
context_query_kwargs: Optional[dict] = None,
synthesize_response: bool = True,
response_synthesis_prompt: Optional[BasePromptTemplate] = None,
refine_synthesis_prompt: Optional[BasePromptTemplate] = None,
tables: Optional[Union[List[str], List[Table]]] = None,
context_str_prefix: Optional[str] = None,
sql_only: bool = False,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
text_to_sql_prompt = text_to_sql_prompt or DEFAULT_TEXT_TO_SQL_PGVECTOR_PROMPT
self._sql_retriever = NLSQLRetriever(
sql_database,
llm=llm,
text_to_sql_prompt=text_to_sql_prompt,
context_query_kwargs=context_query_kwargs,
tables=tables,
sql_parser_mode=SQLParserMode.PGVECTOR,
context_str_prefix=context_str_prefix,
sql_only=sql_only,
callback_manager=callback_manager,
)
super().__init__(
synthesize_response=synthesize_response,
response_synthesis_prompt=response_synthesis_prompt,
refine_synthesis_prompt=refine_synthesis_prompt,
llm=llm,
callback_manager=callback_manager,
**kwargs,
)
@property
def sql_retriever(self) -> NLSQLRetriever:
"""Get SQL retriever."""
return self._sql_retriever
| PGVectorSQLQueryEngine |
python | Textualize__textual | src/textual/widgets/_text_area.py | {
"start": 3247,
"end": 100199
} | class ____(ScrollView):
DEFAULT_CSS = """\
TextArea {
width: 1fr;
height: 1fr;
border: tall $border-blurred;
padding: 0 1;
color: $foreground;
background: $surface;
&.-textual-compact {
border: none !important;
}
& .text-area--cursor {
text-style: $input-cursor-text-style;
}
& .text-area--gutter {
color: $foreground 40%;
}
& .text-area--cursor-gutter {
color: $foreground 60%;
background: $boost;
text-style: bold;
}
& .text-area--cursor-line {
background: $boost;
}
& .text-area--selection {
background: $input-selection-background;
}
& .text-area--matching-bracket {
background: $foreground 30%;
}
& .text-area--suggestion {
color: $text-muted;
}
& .text-area--placeholder {
color: $text 40%;
}
&:focus {
border: tall $border;
}
&:ansi {
& .text-area--selection {
background: transparent;
text-style: reverse;
}
}
&:dark {
.text-area--cursor {
color: $input-cursor-foreground;
background: $input-cursor-background;
}
&.-read-only .text-area--cursor {
background: $warning-darken-1;
}
}
&:light {
.text-area--cursor {
color: $text 90%;
background: $foreground 70%;
}
&.-read-only .text-area--cursor {
background: $warning-darken-1;
}
}
}
"""
COMPONENT_CLASSES: ClassVar[set[str]] = {
"text-area--cursor",
"text-area--gutter",
"text-area--cursor-gutter",
"text-area--cursor-line",
"text-area--selection",
"text-area--matching-bracket",
"text-area--suggestion",
"text-area--placeholder",
}
"""
`TextArea` offers some component classes which can be used to style aspects of the widget.
Note that any attributes provided in the chosen `TextAreaTheme` will take priority here.
| Class | Description |
| :- | :- |
| `text-area--cursor` | Target the cursor. |
| `text-area--gutter` | Target the gutter (line number column). |
| `text-area--cursor-gutter` | Target the gutter area of the line the cursor is on. |
| `text-area--cursor-line` | Target the line the cursor is on. |
| `text-area--selection` | Target the current selection. |
| `text-area--matching-bracket` | Target matching brackets. |
| `text-area--suggestion` | Target the text set in the `suggestion` reactive. |
| `text-area--placeholder` | Target the placeholder text. |
"""
BINDINGS = [
# Cursor movement
Binding("up", "cursor_up", "Cursor up", show=False),
Binding("down", "cursor_down", "Cursor down", show=False),
Binding("left", "cursor_left", "Cursor left", show=False),
Binding("right", "cursor_right", "Cursor right", show=False),
Binding("ctrl+left", "cursor_word_left", "Cursor word left", show=False),
Binding("ctrl+right", "cursor_word_right", "Cursor word right", show=False),
Binding("home,ctrl+a", "cursor_line_start", "Cursor line start", show=False),
Binding("end,ctrl+e", "cursor_line_end", "Cursor line end", show=False),
Binding("pageup", "cursor_page_up", "Cursor page up", show=False),
Binding("pagedown", "cursor_page_down", "Cursor page down", show=False),
# Making selections (generally holding the shift key and moving cursor)
Binding(
"ctrl+shift+left",
"cursor_word_left(True)",
"Cursor left word select",
show=False,
),
Binding(
"ctrl+shift+right",
"cursor_word_right(True)",
"Cursor right word select",
show=False,
),
Binding(
"shift+home",
"cursor_line_start(True)",
"Cursor line start select",
show=False,
),
Binding(
"shift+end", "cursor_line_end(True)", "Cursor line end select", show=False
),
Binding("shift+up", "cursor_up(True)", "Cursor up select", show=False),
Binding("shift+down", "cursor_down(True)", "Cursor down select", show=False),
Binding("shift+left", "cursor_left(True)", "Cursor left select", show=False),
Binding("shift+right", "cursor_right(True)", "Cursor right select", show=False),
# Shortcut ways of making selections
# Binding("f5", "select_word", "select word", show=False),
Binding("f6", "select_line", "Select line", show=False),
Binding("f7", "select_all", "Select all", show=False),
# Deletion
Binding("backspace", "delete_left", "Delete character left", show=False),
Binding(
"ctrl+w", "delete_word_left", "Delete left to start of word", show=False
),
Binding("delete,ctrl+d", "delete_right", "Delete character right", show=False),
Binding(
"ctrl+f", "delete_word_right", "Delete right to start of word", show=False
),
Binding("ctrl+x", "cut", "Cut", show=False),
Binding("ctrl+c", "copy", "Copy", show=False),
Binding("ctrl+v", "paste", "Paste", show=False),
Binding(
"ctrl+u", "delete_to_start_of_line", "Delete to line start", show=False
),
Binding(
"ctrl+k",
"delete_to_end_of_line_or_delete_line",
"Delete to line end",
show=False,
),
Binding(
"ctrl+shift+k",
"delete_line",
"Delete line",
show=False,
),
Binding("ctrl+z", "undo", "Undo", show=False),
Binding("ctrl+y", "redo", "Redo", show=False),
]
"""
| Key(s) | Description |
| :- | :- |
| up | Move the cursor up. |
| down | Move the cursor down. |
| left | Move the cursor left. |
| ctrl+left | Move the cursor to the start of the word. |
| ctrl+shift+left | Move the cursor to the start of the word and select. |
| right | Move the cursor right. |
| ctrl+right | Move the cursor to the end of the word. |
| ctrl+shift+right | Move the cursor to the end of the word and select. |
| home,ctrl+a | Move the cursor to the start of the line. |
| end,ctrl+e | Move the cursor to the end of the line. |
| shift+home | Move the cursor to the start of the line and select. |
| shift+end | Move the cursor to the end of the line and select. |
| pageup | Move the cursor one page up. |
| pagedown | Move the cursor one page down. |
| shift+up | Select while moving the cursor up. |
| shift+down | Select while moving the cursor down. |
| shift+left | Select while moving the cursor left. |
| shift+right | Select while moving the cursor right. |
| backspace | Delete character to the left of cursor. |
| ctrl+w | Delete from cursor to start of the word. |
| delete,ctrl+d | Delete character to the right of cursor. |
| ctrl+f | Delete from cursor to end of the word. |
| ctrl+shift+k | Delete the current line. |
| ctrl+u | Delete from cursor to the start of the line. |
| ctrl+k | Delete from cursor to the end of the line. |
| f6 | Select the current line. |
| f7 | Select all text in the document. |
| ctrl+z | Undo. |
| ctrl+y | Redo. |
| ctrl+x | Cut selection or line if no selection. |
| ctrl+c | Copy selection to clipboard. |
| ctrl+v | Paste from clipboard. |
"""
language: Reactive[str | None] = reactive(None, always_update=True, init=False)
"""The language to use.
This must be set to a valid, non-None value for syntax highlighting to work.
If the value is a string, a built-in language parser will be used if available.
If you wish to use an unsupported language, you'll have to register
it first using [`TextArea.register_language`][textual.widgets._text_area.TextArea.register_language].
"""
theme: Reactive[str] = reactive("css", always_update=True, init=False)
"""The name of the theme to use.
Themes must be registered using [`TextArea.register_theme`][textual.widgets._text_area.TextArea.register_theme] before they can be used.
Syntax highlighting is only possible when the `language` attribute is set.
"""
selection: Reactive[Selection] = reactive(
Selection(), init=False, always_update=True
)
"""The selection start and end locations (zero-based line_index, offset).
This represents the cursor location and the current selection.
The `Selection.end` always refers to the cursor location.
If no text is selected, then `Selection.end == Selection.start` is True.
The text selected in the document is available via the `TextArea.selected_text` property.
"""
show_line_numbers: Reactive[bool] = reactive(False, init=False)
"""True to show the line number column on the left edge, otherwise False.
Changing this value will immediately re-render the `TextArea`."""
line_number_start: Reactive[int] = reactive(1, init=False)
"""The line number the first line should be."""
indent_width: Reactive[int] = reactive(4, init=False)
"""The width of tabs or the multiple of spaces to align to on pressing the `tab` key.
If the document currently open contains tabs that are currently visible on screen,
altering this value will immediately change the display width of the visible tabs.
"""
match_cursor_bracket: Reactive[bool] = reactive(True, init=False)
"""If the cursor is at a bracket, highlight the matching bracket (if found)."""
cursor_blink: Reactive[bool] = reactive(True, init=False)
"""True if the cursor should blink."""
soft_wrap: Reactive[bool] = reactive(True, init=False)
"""True if text should soft wrap."""
read_only: Reactive[bool] = reactive(False)
"""True if the content is read-only.
Read-only means end users cannot insert, delete or replace content.
The document can still be edited programmatically via the API.
"""
show_cursor: Reactive[bool] = reactive(True)
"""Show the cursor in read only mode?
If `True`, the cursor will be visible when `read_only==True`.
If `False`, the cursor will be hidden when `read_only==True`, and the TextArea will
scroll like other containers.
"""
compact: reactive[bool] = reactive(False, toggle_class="-textual-compact")
"""Enable compact display?"""
highlight_cursor_line: reactive[bool] = reactive(True)
"""Highlight the line under the cursor?"""
_cursor_visible: Reactive[bool] = reactive(False, repaint=False, init=False)
"""Indicates where the cursor is in the blink cycle. If it's currently
not visible due to blinking, this is False."""
suggestion: Reactive[str] = reactive("")
"""A suggestion for auto-complete (pressing right will insert it)."""
hide_suggestion_on_blur: Reactive[bool] = reactive(True)
"""Hide suggestion when the TextArea does not have focus."""
placeholder: Reactive[str | Content] = reactive("")
"""Text to show when the text area has no content."""
@dataclass
class Changed(Message):
"""Posted when the content inside the TextArea changes.
Handle this message using the `on` decorator - `@on(TextArea.Changed)`
or a method named `on_text_area_changed`.
"""
text_area: TextArea
"""The `text_area` that sent this message."""
@property
def control(self) -> TextArea:
"""The `TextArea` that sent this message."""
return self.text_area
@dataclass
class SelectionChanged(Message):
"""Posted when the selection changes.
This includes when the cursor moves or when text is selected."""
selection: Selection
"""The new selection."""
text_area: TextArea
"""The `text_area` that sent this message."""
@property
def control(self) -> TextArea:
return self.text_area
def __init__(
self,
text: str = "",
*,
language: str | None = None,
theme: str = "css",
soft_wrap: bool = True,
tab_behavior: Literal["focus", "indent"] = "focus",
read_only: bool = False,
show_cursor: bool = True,
show_line_numbers: bool = False,
line_number_start: int = 1,
max_checkpoints: int = 50,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
tooltip: RenderableType | None = None,
compact: bool = False,
highlight_cursor_line: bool = True,
placeholder: str | Content = "",
) -> None:
"""Construct a new `TextArea`.
Args:
text: The initial text to load into the TextArea.
language: The language to use.
theme: The theme to use.
soft_wrap: Enable soft wrapping.
tab_behavior: If 'focus', pressing tab will switch focus. If 'indent', pressing tab will insert a tab.
read_only: Enable read-only mode. This prevents edits using the keyboard.
show_cursor: Show the cursor in read only mode (no effect otherwise).
show_line_numbers: Show line numbers on the left edge.
line_number_start: What line number to start on.
max_checkpoints: The maximum number of undo history checkpoints to retain.
name: The name of the `TextArea` widget.
id: The ID of the widget, used to refer to it from Textual CSS.
classes: One or more Textual CSS compatible class names separated by spaces.
disabled: True if the widget is disabled.
tooltip: Optional tooltip.
compact: Enable compact style (without borders).
highlight_cursor_line: Highlight the line under the cursor.
placeholder: Text to display when there is not content.
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self._languages: dict[str, TextAreaLanguage] = {}
"""Maps language names to TextAreaLanguage. This is only used for languages
registered by end-users using `TextArea.register_language`. If a user attempts
to set `TextArea.language` to a language that is not registered here, we'll
attempt to get it from the environment. If that fails, we'll fall back to
plain text.
"""
self._themes: dict[str, TextAreaTheme] = {}
"""Maps theme names to TextAreaTheme."""
self.indent_type: Literal["tabs", "spaces"] = "spaces"
"""Whether to indent using tabs or spaces."""
self._word_pattern = re.compile(r"(?<=\W)(?=\w)|(?<=\w)(?=\W)")
"""Compiled regular expression for what we consider to be a 'word'."""
self.history: EditHistory = EditHistory(
max_checkpoints=max_checkpoints,
checkpoint_timer=2.0,
checkpoint_max_characters=100,
)
"""A stack (the end of the list is the top of the stack) for tracking edits."""
self._selecting = False
"""True if we're currently selecting text using the mouse, otherwise False."""
self._matching_bracket_location: Location | None = None
"""The location (row, column) of the bracket which matches the bracket the
cursor is currently at. If the cursor is at a bracket, or there's no matching
bracket, this will be `None`."""
self._highlights: dict[int, list[Highlight]] = defaultdict(list)
"""Mapping line numbers to the set of highlights for that line."""
self._highlight_query: "Query | None" = None
"""The query that's currently being used for highlighting."""
self.document: DocumentBase = Document(text)
"""The document this widget is currently editing."""
self.wrapped_document: WrappedDocument = WrappedDocument(self.document)
"""The wrapped view of the document."""
self.navigator: DocumentNavigator = DocumentNavigator(self.wrapped_document)
"""Queried to determine where the cursor should move given a navigation
action, accounting for wrapping etc."""
self._cursor_offset = (0, 0)
"""The virtual offset of the cursor (not screen-space offset)."""
self.set_reactive(TextArea.soft_wrap, soft_wrap)
self.set_reactive(TextArea.read_only, read_only)
self.set_reactive(TextArea.show_cursor, show_cursor)
self.set_reactive(TextArea.show_line_numbers, show_line_numbers)
self.set_reactive(TextArea.line_number_start, line_number_start)
self.set_reactive(TextArea.highlight_cursor_line, highlight_cursor_line)
self.set_reactive(TextArea.placeholder, placeholder)
self._line_cache: LRUCache[tuple, Strip] = LRUCache(1024)
self._set_document(text, language)
self.language = language
self.theme = theme
self._theme: TextAreaTheme
"""The `TextAreaTheme` corresponding to the set theme name. When the `theme`
reactive is set as a string, the watcher will update this attribute to the
corresponding `TextAreaTheme` object."""
self.tab_behavior = tab_behavior
if tooltip is not None:
self.tooltip = tooltip
self.compact = compact
@classmethod
def code_editor(
cls,
text: str = "",
*,
language: str | None = None,
theme: str = "monokai",
soft_wrap: bool = False,
tab_behavior: Literal["focus", "indent"] = "indent",
read_only: bool = False,
show_cursor: bool = True,
show_line_numbers: bool = True,
line_number_start: int = 1,
max_checkpoints: int = 50,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
tooltip: RenderableType | None = None,
compact: bool = False,
highlight_cursor_line: bool = True,
placeholder: str | Content = "",
) -> TextArea:
"""Construct a new `TextArea` with sensible defaults for editing code.
This instantiates a `TextArea` with line numbers enabled, soft wrapping
disabled, "indent" tab behavior, and the "monokai" theme.
Args:
text: The initial text to load into the TextArea.
language: The language to use.
theme: The theme to use.
soft_wrap: Enable soft wrapping.
tab_behavior: If 'focus', pressing tab will switch focus. If 'indent', pressing tab will insert a tab.
read_only: Enable read-only mode. This prevents edits using the keyboard.
show_cursor: Show the cursor in read only mode (no effect otherwise).
show_line_numbers: Show line numbers on the left edge.
line_number_start: What line number to start on.
name: The name of the `TextArea` widget.
id: The ID of the widget, used to refer to it from Textual CSS.
classes: One or more Textual CSS compatible class names separated by spaces.
disabled: True if the widget is disabled.
tooltip: Optional tooltip
compact: Enable compact style (without borders).
highlight_cursor_line: Highlight the line under the cursor.
"""
return cls(
text,
language=language,
theme=theme,
soft_wrap=soft_wrap,
tab_behavior=tab_behavior,
read_only=read_only,
show_cursor=show_cursor,
show_line_numbers=show_line_numbers,
line_number_start=line_number_start,
max_checkpoints=max_checkpoints,
name=name,
id=id,
classes=classes,
disabled=disabled,
tooltip=tooltip,
compact=compact,
highlight_cursor_line=highlight_cursor_line,
placeholder=placeholder,
)
@staticmethod
def _get_builtin_highlight_query(language_name: str) -> str:
"""Get the highlight query for a builtin language.
Args:
language_name: The name of the builtin language.
Returns:
The highlight query.
"""
try:
highlight_query_path = (
Path(_HIGHLIGHTS_PATH.resolve()) / f"{language_name}.scm"
)
highlight_query = highlight_query_path.read_text()
except OSError as error:
log.warning(f"Unable to load highlight query. {error}")
highlight_query = ""
return highlight_query
def notify_style_update(self) -> None:
self._line_cache.clear()
super().notify_style_update()
def update_suggestion(self) -> None:
"""A hook to update the [`suggestion`][textual.widgets.TextArea.suggestion] attribute."""
def check_consume_key(self, key: str, character: str | None = None) -> bool:
"""Check if the widget may consume the given key.
As a textarea we are expecting to capture printable keys.
Args:
key: A key identifier.
character: A character associated with the key, or `None` if there isn't one.
Returns:
`True` if the widget may capture the key in its `Key` message, or `False` if it won't.
"""
if self.read_only:
# In read only mode we don't consume any key events
return False
if self.tab_behavior == "indent" and key == "tab":
# If tab_behavior is indent, then we consume the tab
return True
# Otherwise we capture all printable keys
return character is not None and character.isprintable()
def _build_highlight_map(self) -> None:
"""Query the tree for ranges to highlights, and update the internal highlights mapping."""
self._line_cache.clear()
highlights = self._highlights
highlights.clear()
if not self._highlight_query:
return
captures = self.document.query_syntax_tree(self._highlight_query)
for highlight_name, nodes in captures.items():
for node in nodes:
node_start_row, node_start_column = node.start_point
node_end_row, node_end_column = node.end_point
if node_start_row == node_end_row:
highlight = (node_start_column, node_end_column, highlight_name)
highlights[node_start_row].append(highlight)
else:
# Add the first line of the node range
highlights[node_start_row].append(
(node_start_column, None, highlight_name)
)
# Add the middle lines - entire row of this node is highlighted
for node_row in range(node_start_row + 1, node_end_row):
highlights[node_row].append((0, None, highlight_name))
# Add the last line of the node range
highlights[node_end_row].append(
(0, node_end_column, highlight_name)
)
def _watch_has_focus(self, focus: bool) -> None:
self._cursor_visible = focus
if focus:
self._restart_blink()
self.app.cursor_position = self.cursor_screen_offset
self.history.checkpoint()
else:
self._pause_blink(visible=False)
def _watch_selection(
self, previous_selection: Selection, selection: Selection
) -> None:
"""When the cursor moves, scroll it into view."""
# Find the visual offset of the cursor in the document
if not self.is_mounted:
return
self.app.clear_selection()
cursor_location = selection.end
self.scroll_cursor_visible()
cursor_row, cursor_column = cursor_location
try:
character = self.document[cursor_row][cursor_column]
except IndexError:
character = ""
# Record the location of a matching closing/opening bracket.
match_location = self.find_matching_bracket(character, cursor_location)
self._matching_bracket_location = match_location
if match_location is not None:
_, offset_y = self._cursor_offset
self.refresh_lines(offset_y)
self.app.cursor_position = self.cursor_screen_offset
if previous_selection != selection:
self.post_message(self.SelectionChanged(selection, self))
def _watch_cursor_blink(self, blink: bool) -> None:
if not self.is_mounted:
return None
if blink and self.has_focus:
self._restart_blink()
else:
self._pause_blink(visible=self.has_focus)
def _watch_read_only(self, read_only: bool) -> None:
self.set_class(read_only, "-read-only")
self._set_theme(self._theme.name)
def _recompute_cursor_offset(self):
"""Recompute the (x, y) coordinate of the cursor in the wrapped document."""
self._cursor_offset = self.wrapped_document.location_to_offset(
self.cursor_location
)
def find_matching_bracket(
self, bracket: str, search_from: Location
) -> Location | None:
"""If the character is a bracket, find the matching bracket.
Args:
bracket: The character we're searching for the matching bracket of.
search_from: The location to start the search.
Returns:
The `Location` of the matching bracket, or `None` if it's not found.
If the character is not available for bracket matching, `None` is returned.
"""
match_location = None
bracket_stack: list[str] = []
if bracket in _OPENING_BRACKETS:
# Search forwards for a closing bracket
for candidate, candidate_location in self._yield_character_locations(
search_from
):
if candidate in _OPENING_BRACKETS:
bracket_stack.append(candidate)
elif candidate in _CLOSING_BRACKETS:
if (
bracket_stack
and bracket_stack[-1] == _CLOSING_BRACKETS[candidate]
):
bracket_stack.pop()
if not bracket_stack:
match_location = candidate_location
break
elif bracket in _CLOSING_BRACKETS:
# Search backwards for an opening bracket
for (
candidate,
candidate_location,
) in self._yield_character_locations_reverse(search_from):
if candidate in _CLOSING_BRACKETS:
bracket_stack.append(candidate)
elif candidate in _OPENING_BRACKETS:
if (
bracket_stack
and bracket_stack[-1] == _OPENING_BRACKETS[candidate]
):
bracket_stack.pop()
if not bracket_stack:
match_location = candidate_location
break
return match_location
def _validate_selection(self, selection: Selection) -> Selection:
"""Clamp the selection to valid locations."""
start, end = selection
clamp_visitable = self.clamp_visitable
return Selection(clamp_visitable(start), clamp_visitable(end))
def _watch_language(self, language: str | None) -> None:
"""When the language is updated, update the type of document."""
self._set_document(self.document.text, language)
def _watch_show_line_numbers(self) -> None:
"""The line number gutter contributes to virtual size, so recalculate."""
self._rewrap_and_refresh_virtual_size()
self.scroll_cursor_visible()
def _watch_line_number_start(self) -> None:
"""The line number gutter max size might change and contributes to virtual size, so recalculate."""
self._rewrap_and_refresh_virtual_size()
self.scroll_cursor_visible()
def _watch_indent_width(self) -> None:
"""Changing width of tabs will change the document display width."""
self._rewrap_and_refresh_virtual_size()
self.scroll_cursor_visible()
def _watch_show_vertical_scrollbar(self) -> None:
if self.wrap_width:
self._rewrap_and_refresh_virtual_size()
self.scroll_cursor_visible()
def _watch_theme(self, theme: str) -> None:
"""We set the styles on this widget when the theme changes, to ensure that
if padding is applied, the colors match."""
self._set_theme(theme)
def _app_theme_changed(self) -> None:
self._set_theme(self._theme.name)
def _set_theme(self, theme: str) -> None:
theme_object: TextAreaTheme | None
# If the user supplied a string theme name, find it and apply it.
try:
theme_object = self._themes[theme]
except KeyError:
theme_object = TextAreaTheme.get_builtin_theme(theme)
if theme_object is None:
raise ThemeDoesNotExist(
f"{theme!r} is not a builtin theme, or it has not been registered. "
f"To use a custom theme, register it first using `register_theme`, "
f"then switch to that theme by setting the `TextArea.theme` attribute."
) from None
self._theme = dataclasses.replace(theme_object)
if theme_object:
base_style = theme_object.base_style
if base_style:
color = base_style.color
background = base_style.bgcolor
if color:
self.styles.color = Color.from_rich_color(color)
if background:
self.styles.background = Color.from_rich_color(background)
else:
# When the theme doesn't define a base style (e.g. the `css` theme),
# the TextArea background/color should fallback to its CSS colors.
#
# Since these styles may have already been changed by another theme,
# we need to reset the background/color styles to the default values.
self.styles.color = None
self.styles.background = None
@property
def available_themes(self) -> set[str]:
"""A list of the names of the themes available to the `TextArea`.
The values in this list can be assigned `theme` reactive attribute of
`TextArea`.
You can retrieve the full specification for a theme by passing one of
the strings from this list into `TextAreaTheme.get_by_name(theme_name: str)`.
Alternatively, you can directly retrieve a list of `TextAreaTheme` objects
(which contain the full theme specification) by calling
`TextAreaTheme.builtin_themes()`.
"""
return {
theme.name for theme in TextAreaTheme.builtin_themes()
} | self._themes.keys()
def register_theme(self, theme: TextAreaTheme) -> None:
"""Register a theme for use by the `TextArea`.
After registering a theme, you can set themes by assigning the theme
name to the `TextArea.theme` reactive attribute. For example
`text_area.theme = "my_custom_theme"` where `"my_custom_theme"` is the
name of the theme you registered.
If you supply a theme with a name that already exists that theme
will be overwritten.
"""
self._themes[theme.name] = theme
@property
def available_languages(self) -> set[str]:
"""A set of the names of languages available to the `TextArea`.
The values in this set can be assigned to the `language` reactive attribute
of `TextArea`.
The returned set contains the builtin languages installed with the syntax extras,
plus those registered via the `register_language` method.
"""
return set(BUILTIN_LANGUAGES) | self._languages.keys()
def register_language(
self,
name: str,
language: "Language",
highlight_query: str,
) -> None:
"""Register a language and corresponding highlight query.
Calling this method does not change the language of the `TextArea`.
On switching to this language (via the `language` reactive attribute),
syntax highlighting will be performed using the given highlight query.
If a string `name` is supplied for a builtin supported language, then
this method will update the default highlight query for that language.
Registering a language only registers it to this instance of `TextArea`.
Args:
name: The name of the language.
language: A tree-sitter `Language` object.
highlight_query: The highlight query to use for syntax highlighting this language.
"""
self._languages[name] = TextAreaLanguage(name, language, highlight_query)
def update_highlight_query(self, name: str, highlight_query: str) -> None:
"""Update the highlight query for an already registered language.
Args:
name: The name of the language.
highlight_query: The highlight query to use for syntax highlighting this language.
"""
if name not in self._languages:
self._languages[name] = TextAreaLanguage(name, None, highlight_query)
else:
self._languages[name].highlight_query = highlight_query
# If this is the currently loaded language, reload the document because
# it could be a different highlight query for the same language.
if name == self.language:
self._set_document(self.text, name)
def _set_document(self, text: str, language: str | None) -> None:
"""Construct and return an appropriate document.
Args:
text: The text of the document.
language: The name of the language to use. This must correspond to a tree-sitter
language available in the current environment (e.g. use `python` for `tree-sitter-python`).
If None, the document will be treated as plain text.
"""
self._highlight_query = None
if TREE_SITTER and language:
if language in self._languages:
# User-registered languages take priority.
highlight_query = self._languages[language].highlight_query
document_language = self._languages[language].language
if document_language is None:
document_language = get_language(language)
else:
# No user-registered language, so attempt to use a built-in language.
highlight_query = self._get_builtin_highlight_query(language)
document_language = get_language(language)
# No built-in language, and no user-registered language: use plain text and warn.
if document_language is None:
raise LanguageDoesNotExist(
f"tree-sitter is available, but no built-in or user-registered language called {language!r}.\n"
f"Ensure the language is installed (e.g. `pip install tree-sitter-ruby`)\n"
f"Falling back to plain text."
)
else:
document: DocumentBase
try:
document = SyntaxAwareDocument(text, document_language)
except SyntaxAwareDocumentError:
document = Document(text)
log.warning(
f"Parser not found for language {document_language!r}. Parsing disabled."
)
else:
self._highlight_query = document.prepare_query(highlight_query)
elif language and not TREE_SITTER:
# User has supplied a language i.e. `TextArea(language="python")`, but they
# don't have tree-sitter available in the environment. We fallback to plain text.
log.warning(
"tree-sitter not available in this environment. Parsing disabled.\n"
"You may need to install the `syntax` extras alongside textual.\n"
"Try `pip install 'textual[syntax]'` or '`poetry add textual[syntax]' to get started quickly.\n\n"
"Alternatively, install tree-sitter manually (`pip install tree-sitter`) and then\n"
"install the required language (e.g. `pip install tree-sitter-ruby`), then register it.\n"
"and its highlight query using TextArea.register_language().\n\n"
"Falling back to plain text for now."
)
document = Document(text)
else:
# tree-sitter is available, but the user has supplied None or "" for the language.
# Use a regular plain-text document.
document = Document(text)
self.document = document
self.wrapped_document = WrappedDocument(document, tab_width=self.indent_width)
self.navigator = DocumentNavigator(self.wrapped_document)
self._build_highlight_map()
self.move_cursor((0, 0))
self._rewrap_and_refresh_virtual_size()
@property
def _visible_line_indices(self) -> tuple[int, int]:
"""Return the visible line indices as a tuple (top, bottom).
Returns:
A tuple (top, bottom) indicating the top and bottom visible line indices.
"""
_, scroll_offset_y = self.scroll_offset
return scroll_offset_y, scroll_offset_y + self.size.height
def _watch_scroll_x(self) -> None:
self.app.cursor_position = self.cursor_screen_offset
def _watch_scroll_y(self) -> None:
self.app.cursor_position = self.cursor_screen_offset
def load_text(self, text: str) -> None:
"""Load text into the TextArea.
This will replace the text currently in the TextArea and clear the edit history.
Args:
text: The text to load into the TextArea.
"""
self.history.clear()
self._set_document(text, self.language)
self.post_message(self.Changed(self).set_sender(self))
self.update_suggestion()
def _on_resize(self) -> None:
self._rewrap_and_refresh_virtual_size()
def _watch_soft_wrap(self) -> None:
self._rewrap_and_refresh_virtual_size()
self.call_after_refresh(self.scroll_cursor_visible, center=True)
@property
def wrap_width(self) -> int:
"""The width which gets used when the document wraps.
Accounts for gutter, scrollbars, etc.
"""
width, _ = self.scrollable_content_region.size
cursor_width = 1
if self.soft_wrap:
return max(0, width - self.gutter_width - cursor_width)
return 0
def _rewrap_and_refresh_virtual_size(self) -> None:
self.wrapped_document.wrap(self.wrap_width, tab_width=self.indent_width)
self._line_cache.clear()
self._refresh_size()
@property
def is_syntax_aware(self) -> bool:
"""True if the TextArea is currently syntax aware - i.e. it's parsing document content."""
return isinstance(self.document, SyntaxAwareDocument)
def _yield_character_locations(
self, start: Location
) -> Iterable[tuple[str, Location]]:
"""Yields character locations starting from the given location.
Does not yield location of line separator characters like `\\n`.
Args:
start: The location to start yielding from.
Returns:
Yields tuples of (character, (row, column)).
"""
row, column = start
document = self.document
line_count = document.line_count
while 0 <= row < line_count:
line = document[row]
while column < len(line):
yield line[column], (row, column)
column += 1
column = 0
row += 1
def _yield_character_locations_reverse(
self, start: Location
) -> Iterable[tuple[str, Location]]:
row, column = start
document = self.document
line_count = document.line_count
while line_count > row >= 0:
line = document[row]
if column == -1:
column = len(line) - 1
while column >= 0:
yield line[column], (row, column)
column -= 1
row -= 1
def _refresh_size(self) -> None:
"""Update the virtual size of the TextArea."""
if self.soft_wrap:
self.virtual_size = Size(0, self.wrapped_document.height)
else:
# +1 width to make space for the cursor resting at the end of the line
width, height = self.document.get_size(self.indent_width)
self.virtual_size = Size(width + self.gutter_width + 1, height)
self._refresh_scrollbars()
@property
def _draw_cursor(self) -> bool:
"""Draw the cursor?"""
if self.read_only:
# If we are in read only mode, we don't want the cursor to blink
return self.show_cursor and self.has_focus
draw_cursor = (
self.has_focus
and not self.cursor_blink
or (self.cursor_blink and self._cursor_visible)
)
return draw_cursor
@property
def _has_cursor(self) -> bool:
"""Is there a usable cursor?"""
return not (self.read_only and not self.show_cursor)
def get_line(self, line_index: int) -> Text:
"""Retrieve the line at the given line index.
You can stylize the Text object returned here to apply additional
styling to TextArea content.
Args:
line_index: The index of the line.
Returns:
A `rich.Text` object containing the requested line.
"""
line_string = self.document.get_line(line_index)
return Text(line_string, end="", no_wrap=True)
def render_lines(self, crop: Region) -> list[Strip]:
theme = self._theme
if theme:
theme.apply_css(self)
return super().render_lines(crop)
def render_line(self, y: int) -> Strip:
"""Render a single line of the TextArea. Called by Textual.
Args:
y: Y Coordinate of line relative to the widget region.
Returns:
A rendered line.
"""
if not self.text and self.placeholder:
placeholder_lines = Content.from_text(self.placeholder).wrap(
self.content_size.width
)
if y < len(placeholder_lines):
style = self.get_visual_style("text-area--placeholder")
content = placeholder_lines[y].stylize(style)
if self._draw_cursor and y == 0:
theme = self._theme
cursor_style = theme.cursor_style if theme else None
if cursor_style:
content = content.stylize(
ContentStyle.from_rich_style(cursor_style), 0, 1
)
return Strip(
content.render_segments(self.visual_style), content.cell_length
)
scroll_x, scroll_y = self.scroll_offset
absolute_y = scroll_y + y
selection = self.selection
_, cursor_y = self._cursor_offset
cache_key = (
self.size,
scroll_x,
absolute_y,
(
selection
if selection.contains_line(absolute_y) or self.soft_wrap
else selection.end[0] == absolute_y
),
(
selection.end
if (
self._cursor_visible
and self.cursor_blink
and absolute_y == cursor_y
)
else None
),
self.theme,
self._matching_bracket_location,
self.match_cursor_bracket,
self.soft_wrap,
self.show_line_numbers,
self.read_only,
self.show_cursor,
self.suggestion,
)
if (cached_line := self._line_cache.get(cache_key)) is not None:
return cached_line
line = self._render_line(y)
self._line_cache[cache_key] = line
return line
def _render_line(self, y: int) -> Strip:
"""Render a single line of the TextArea. Called by Textual.
Args:
y: Y Coordinate of line relative to the widget region.
Returns:
A rendered line.
"""
theme = self._theme
base_style = (
theme.base_style
if theme and theme.base_style is not None
else self.rich_style
)
wrapped_document = self.wrapped_document
scroll_x, scroll_y = self.scroll_offset
# Account for how much the TextArea is scrolled.
y_offset = y + scroll_y
# If we're beyond the height of the document, render blank lines
out_of_bounds = y_offset >= wrapped_document.height
if out_of_bounds:
return Strip.blank(self.size.width, base_style)
# Get the line corresponding to this offset
try:
line_info = wrapped_document._offset_to_line_info[y_offset]
except IndexError:
line_info = None
if line_info is None:
return Strip.blank(self.size.width, base_style)
line_index, section_offset = line_info
line = self.get_line(line_index)
line_character_count = len(line)
line.tab_size = self.indent_width
line.set_length(line_character_count + 1) # space at end for cursor
virtual_width, _virtual_height = self.virtual_size
selection = self.selection
start, end = selection
cursor_row, cursor_column = end
selection_top, selection_bottom = sorted(selection)
selection_top_row, selection_top_column = selection_top
selection_bottom_row, selection_bottom_column = selection_bottom
highlight_cursor_line = self.highlight_cursor_line and self._has_cursor
cursor_line_style = (
theme.cursor_line_style if (theme and highlight_cursor_line) else None
)
has_cursor = self._has_cursor
if has_cursor and cursor_line_style and cursor_row == line_index:
line.stylize(cursor_line_style)
# Selection styling
if start != end and selection_top_row <= line_index <= selection_bottom_row:
# If this row intersects with the selection range
selection_style = theme.selection_style if theme else None
cursor_row, _ = end
if selection_style:
if line_character_count == 0 and line_index != cursor_row:
# A simple highlight to show empty lines are included in the selection
line.plain = "▌"
line.stylize(Style(color=selection_style.bgcolor))
else:
if line_index == selection_top_row == selection_bottom_row:
# Selection within a single line
line.stylize(
selection_style,
start=selection_top_column,
end=selection_bottom_column,
)
else:
# Selection spanning multiple lines
if line_index == selection_top_row:
line.stylize(
selection_style,
start=selection_top_column,
end=line_character_count,
)
elif line_index == selection_bottom_row:
line.stylize(selection_style, end=selection_bottom_column)
else:
line.stylize(selection_style, end=line_character_count)
highlights = self._highlights
if highlights and theme:
line_bytes = _utf8_encode(line.plain)
byte_to_codepoint = build_byte_to_codepoint_dict(line_bytes)
get_highlight_from_theme = theme.syntax_styles.get
line_highlights = highlights[line_index]
for highlight_start, highlight_end, highlight_name in line_highlights:
node_style = get_highlight_from_theme(highlight_name)
if node_style is not None:
line.stylize(
node_style,
byte_to_codepoint.get(highlight_start, 0),
byte_to_codepoint.get(highlight_end) if highlight_end else None,
)
# Highlight the cursor
matching_bracket = self._matching_bracket_location
match_cursor_bracket = self.match_cursor_bracket
draw_matched_brackets = (
has_cursor
and match_cursor_bracket
and matching_bracket is not None
and start == end
)
if cursor_row == line_index:
draw_cursor = self._draw_cursor
if draw_matched_brackets:
matching_bracket_style = theme.bracket_matching_style if theme else None
if matching_bracket_style:
line.stylize(
matching_bracket_style,
cursor_column,
cursor_column + 1,
)
if self.suggestion and (self.has_focus or not self.hide_suggestion_on_blur):
suggestion_style = self.get_component_rich_style(
"text-area--suggestion"
)
line = Text.assemble(
line[:cursor_column],
(self.suggestion, suggestion_style),
line[cursor_column:],
)
if draw_cursor:
cursor_style = theme.cursor_style if theme else None
if cursor_style:
line.stylize(cursor_style, cursor_column, cursor_column + 1)
# Highlight the partner opening/closing bracket.
if draw_matched_brackets:
# mypy doesn't know matching bracket is guaranteed to be non-None
assert matching_bracket is not None
bracket_match_row, bracket_match_column = matching_bracket
if theme and bracket_match_row == line_index:
matching_bracket_style = theme.bracket_matching_style
if matching_bracket_style:
line.stylize(
matching_bracket_style,
bracket_match_column,
bracket_match_column + 1,
)
# Build the gutter text for this line
gutter_width = self.gutter_width
if self.show_line_numbers:
if cursor_row == line_index and highlight_cursor_line:
gutter_style = theme.cursor_line_gutter_style
else:
gutter_style = theme.gutter_style
gutter_width_no_margin = gutter_width - 2
gutter_content = (
str(line_index + self.line_number_start) if section_offset == 0 else ""
)
gutter = [
Segment(f"{gutter_content:>{gutter_width_no_margin}} ", gutter_style)
]
else:
gutter = []
# TODO: Lets not apply the division each time through render_line.
# We should cache sections with the edit counts.
wrap_offsets = wrapped_document.get_offsets(line_index)
if wrap_offsets:
sections = line.divide(wrap_offsets) # TODO cache result with edit count
line = sections[section_offset]
line_tab_widths = wrapped_document.get_tab_widths(line_index)
line.end = ""
# Get the widths of the tabs corresponding only to the section of the
# line that is currently being rendered. We don't care about tabs in
# other sections of the same line.
# Count the tabs before this section.
tabs_before = 0
for section_index in range(section_offset):
tabs_before += sections[section_index].plain.count("\t")
# Count the tabs in this section.
tabs_within = line.plain.count("\t")
section_tab_widths = line_tab_widths[
tabs_before : tabs_before + tabs_within
]
line = expand_text_tabs_from_widths(line, section_tab_widths)
else:
line.expand_tabs(self.indent_width)
base_width = (
self.scrollable_content_region.size.width
if self.soft_wrap
else max(virtual_width, self.region.size.width)
)
target_width = base_width - self.gutter_width
# Crop the line to show only the visible part (some may be scrolled out of view)
console = self.app.console
text_strip = Strip(line.render(console), cell_length=line.cell_len)
if not self.soft_wrap:
text_strip = text_strip.crop(scroll_x, scroll_x + virtual_width)
# Stylize the line the cursor is currently on.
if cursor_row == line_index and self.highlight_cursor_line:
line_style = cursor_line_style
else:
line_style = theme.base_style if theme else None
text_strip = text_strip.extend_cell_length(target_width, line_style)
if gutter:
strip = Strip.join([Strip(gutter, cell_length=gutter_width), text_strip])
else:
strip = text_strip
return strip.apply_style(base_style)
@property
def text(self) -> str:
"""The entire text content of the document."""
return self.document.text
@text.setter
def text(self, value: str) -> None:
"""Replace the text currently in the TextArea. This is an alias of `load_text`.
Setting this value will clear the edit history.
Args:
value: The text to load into the TextArea.
"""
self.load_text(value)
@property
def selected_text(self) -> str:
"""The text between the start and end points of the current selection."""
start, end = self.selection
return self.get_text_range(start, end)
@property
def matching_bracket_location(self) -> Location | None:
"""The location of the matching bracket, if there is one."""
return self._matching_bracket_location
def get_text_range(self, start: Location, end: Location) -> str:
"""Get the text between a start and end location.
Args:
start: The start location.
end: The end location.
Returns:
The text between start and end.
"""
start, end = sorted((start, end))
return self.document.get_text_range(start, end)
def edit(self, edit: Edit) -> EditResult:
"""Perform an Edit.
Args:
edit: The Edit to perform.
Returns:
Data relating to the edit that may be useful. The data returned
may be different depending on the edit performed.
"""
if self.suggestion.startswith(edit.text):
self.suggestion = self.suggestion[len(edit.text) :]
else:
self.suggestion = ""
old_gutter_width = self.gutter_width
result = edit.do(self)
self.history.record(edit)
new_gutter_width = self.gutter_width
if old_gutter_width != new_gutter_width:
self.wrapped_document.wrap(self.wrap_width, self.indent_width)
else:
self.wrapped_document.wrap_range(
edit.top,
edit.bottom,
result.end_location,
)
edit.after(self)
self._build_highlight_map()
self.post_message(self.Changed(self))
self.update_suggestion()
self._refresh_size()
return result
def undo(self) -> None:
"""Undo the edits since the last checkpoint (the most recent batch of edits)."""
if edits := self.history._pop_undo():
self._undo_batch(edits)
def action_undo(self) -> None:
"""Undo the edits since the last checkpoint (the most recent batch of edits)."""
self.undo()
def redo(self) -> None:
"""Redo the most recently undone batch of edits."""
if edits := self.history._pop_redo():
self._redo_batch(edits)
def action_redo(self) -> None:
"""Redo the most recently undone batch of edits."""
self.redo()
def _undo_batch(self, edits: Sequence[Edit]) -> None:
"""Undo a batch of Edits.
The sequence must be chronologically ordered by edit time.
There must be no edits missing from the sequence, or the resulting content
will be incorrect.
Args:
edits: The edits to undo, in the order they were originally performed.
"""
if not edits:
return
old_gutter_width = self.gutter_width
minimum_top = edits[-1].top
maximum_old_bottom = (0, 0)
maximum_new_bottom = (0, 0)
for edit in reversed(edits):
edit.undo(self)
end_location = (
edit._edit_result.end_location if edit._edit_result else (0, 0)
)
if edit.top < minimum_top:
minimum_top = edit.top
if end_location > maximum_old_bottom:
maximum_old_bottom = end_location
if edit.bottom > maximum_new_bottom:
maximum_new_bottom = edit.bottom
new_gutter_width = self.gutter_width
if old_gutter_width != new_gutter_width:
self.wrapped_document.wrap(self.wrap_width, self.indent_width)
else:
self.wrapped_document.wrap_range(
minimum_top, maximum_old_bottom, maximum_new_bottom
)
self._refresh_size()
for edit in reversed(edits):
edit.after(self)
self._build_highlight_map()
self.post_message(self.Changed(self))
self.update_suggestion()
def _redo_batch(self, edits: Sequence[Edit]) -> None:
"""Redo a batch of Edits in order.
The sequence must be chronologically ordered by edit time.
Edits are applied from the start of the sequence to the end.
There must be no edits missing from the sequence, or the resulting content
will be incorrect.
Args:
edits: The edits to redo.
"""
if not edits:
return
old_gutter_width = self.gutter_width
minimum_top = edits[0].top
maximum_old_bottom = (0, 0)
maximum_new_bottom = (0, 0)
for edit in edits:
edit.do(self, record_selection=False)
end_location = (
edit._edit_result.end_location if edit._edit_result else (0, 0)
)
if edit.top < minimum_top:
minimum_top = edit.top
if end_location > maximum_new_bottom:
maximum_new_bottom = end_location
if edit.bottom > maximum_old_bottom:
maximum_old_bottom = edit.bottom
new_gutter_width = self.gutter_width
if old_gutter_width != new_gutter_width:
self.wrapped_document.wrap(self.wrap_width, self.indent_width)
else:
self.wrapped_document.wrap_range(
minimum_top,
maximum_old_bottom,
maximum_new_bottom,
)
self._refresh_size()
for edit in edits:
edit.after(self)
self._build_highlight_map()
self.post_message(self.Changed(self))
self.update_suggestion()
async def _on_key(self, event: events.Key) -> None:
"""Handle key presses which correspond to document inserts."""
self._restart_blink()
if self.read_only:
return
key = event.key
insert_values = {
"enter": "\n",
}
if self.tab_behavior == "indent":
if key == "escape":
event.stop()
event.prevent_default()
self.screen.focus_next()
return
if self.indent_type == "tabs":
insert_values["tab"] = "\t"
else:
insert_values["tab"] = " " * self._find_columns_to_next_tab_stop()
if event.is_printable or key in insert_values:
event.stop()
event.prevent_default()
insert = insert_values.get(key, event.character)
# `insert` is not None because event.character cannot be
# None because we've checked that it's printable.
assert insert is not None
start, end = self.selection
self._replace_via_keyboard(insert, start, end)
def _find_columns_to_next_tab_stop(self) -> int:
"""Get the location of the next tab stop after the cursors position on the current line.
If the cursor is already at a tab stop, this returns the *next* tab stop location.
Returns:
The number of cells to the next tab stop from the current cursor column.
"""
cursor_row, cursor_column = self.cursor_location
line_text = self.document[cursor_row]
indent_width = self.indent_width
if not line_text:
return indent_width
width_before_cursor = self.get_column_width(cursor_row, cursor_column)
spaces_to_insert = indent_width - (
(indent_width + width_before_cursor) % indent_width
)
return spaces_to_insert
def get_target_document_location(self, event: MouseEvent) -> Location:
"""Given a MouseEvent, return the row and column offset of the event in document-space.
Args:
event: The MouseEvent.
Returns:
The location of the mouse event within the document.
"""
scroll_x, scroll_y = self.scroll_offset
target_x = event.x - self.gutter_width + scroll_x - self.gutter.left
target_y = event.y + scroll_y - self.gutter.top
location = self.wrapped_document.offset_to_location(Offset(target_x, target_y))
return location
@property
def gutter_width(self) -> int:
"""The width of the gutter (the left column containing line numbers).
Returns:
The cell-width of the line number column. If `show_line_numbers` is `False` returns 0.
"""
# The longest number in the gutter plus two extra characters: `│ `.
gutter_margin = 2
gutter_width = (
len(str(self.document.line_count - 1 + self.line_number_start))
+ gutter_margin
if self.show_line_numbers
else 0
)
return gutter_width
def _on_mount(self, event: events.Mount) -> None:
def text_selection_started(screen: Screen) -> None:
"""Signal callback to unselect when arbitrary text selection starts."""
self.selection = Selection(self.cursor_location, self.cursor_location)
self.screen.text_selection_started_signal.subscribe(
self, text_selection_started, immediate=True
)
# When `app.theme` reactive is changed, reset the theme to clear cached styles.
self.watch(self.app, "theme", self._app_theme_changed, init=False)
self.blink_timer = self.set_interval(
0.5,
self._toggle_cursor_blink_visible,
pause=not (self.cursor_blink and self.has_focus),
)
def _toggle_cursor_blink_visible(self) -> None:
"""Toggle visibility of the cursor for the purposes of 'cursor blink'."""
if not self.screen.is_active:
return
self._cursor_visible = not self._cursor_visible
_, cursor_y = self._cursor_offset
self.refresh_lines(cursor_y)
def _watch__cursor_visible(self) -> None:
"""When the cursor visibility is toggled, ensure the row is refreshed."""
_, cursor_y = self._cursor_offset
self.refresh_lines(cursor_y)
def _restart_blink(self) -> None:
"""Reset the cursor blink timer."""
if self.cursor_blink:
self._cursor_visible = True
if self.is_mounted:
self.blink_timer.reset()
def _pause_blink(self, visible: bool = True) -> None:
"""Pause the cursor blinking but ensure it stays visible."""
self._cursor_visible = visible
if self.is_mounted:
self.blink_timer.pause()
async def _on_mouse_down(self, event: events.MouseDown) -> None:
"""Update the cursor position, and begin a selection using the mouse."""
target = self.get_target_document_location(event)
self.selection = Selection.cursor(target)
self._selecting = True
# Capture the mouse so that if the cursor moves outside the
# TextArea widget while selecting, the widget still scrolls.
self.capture_mouse()
self._pause_blink(visible=False)
self.history.checkpoint()
async def _on_mouse_move(self, event: events.MouseMove) -> None:
"""Handles click and drag to expand and contract the selection."""
if self._selecting:
target = self.get_target_document_location(event)
selection_start, _ = self.selection
self.selection = Selection(selection_start, target)
def _end_mouse_selection(self) -> None:
"""Finalize the selection that has been made using the mouse."""
if self._selecting:
self._selecting = False
self.release_mouse()
self.record_cursor_width()
self._restart_blink()
async def _on_mouse_up(self, event: events.MouseUp) -> None:
"""Finalize the selection that has been made using the mouse."""
self._end_mouse_selection()
async def _on_hide(self, event: events.Hide) -> None:
"""Finalize the selection that has been made using the mouse when the widget is hidden."""
self._end_mouse_selection()
async def _on_paste(self, event: events.Paste) -> None:
"""When a paste occurs, insert the text from the paste event into the document."""
if self.read_only:
return
if result := self._replace_via_keyboard(event.text, *self.selection):
self.move_cursor(result.end_location)
self.focus()
def cell_width_to_column_index(self, cell_width: int, row_index: int) -> int:
"""Return the column that the cell width corresponds to on the given row.
Args:
cell_width: The cell width to convert.
row_index: The index of the row to examine.
Returns:
The column corresponding to the cell width on that row.
"""
line = self.document[row_index]
return cell_width_to_column_index(line, cell_width, self.indent_width)
def clamp_visitable(self, location: Location) -> Location:
"""Clamp the given location to the nearest visitable location.
Args:
location: The location to clamp.
Returns:
The nearest location that we could conceivably navigate to using the cursor.
"""
document = self.document
row, column = location
try:
line_text = document[row]
except IndexError:
line_text = ""
row = clamp(row, 0, document.line_count - 1)
column = clamp(column, 0, len(line_text))
return row, column
# --- Cursor/selection utilities
def scroll_cursor_visible(
self, center: bool = False, animate: bool = False
) -> Offset:
"""Scroll the `TextArea` such that the cursor is visible on screen.
Args:
center: True if the cursor should be scrolled to the center.
animate: True if we should animate while scrolling.
Returns:
The offset that was scrolled to bring the cursor into view.
"""
if not self._has_cursor:
return Offset(0, 0)
self._recompute_cursor_offset()
x, y = self._cursor_offset
scroll_offset = self.scroll_to_region(
Region(x, y, width=3, height=1),
spacing=Spacing(right=self.gutter_width),
animate=animate,
force=True,
center=center,
)
return scroll_offset
def move_cursor(
self,
location: Location,
select: bool = False,
center: bool = False,
record_width: bool = True,
) -> None:
"""Move the cursor to a location.
Args:
location: The location to move the cursor to.
select: If True, select text between the old and new location.
center: If True, scroll such that the cursor is centered.
record_width: If True, record the cursor column cell width after navigating
so that we jump back to the same width the next time we move to a row
that is wide enough.
"""
if not self._has_cursor:
return
if select:
start, _end = self.selection
self.selection = Selection(start, location)
else:
self.selection = Selection.cursor(location)
if record_width:
self.record_cursor_width()
if center:
self.scroll_cursor_visible(center)
self.history.checkpoint()
def move_cursor_relative(
self,
rows: int = 0,
columns: int = 0,
select: bool = False,
center: bool = False,
record_width: bool = True,
) -> None:
"""Move the cursor relative to its current location in document-space.
Args:
rows: The number of rows to move down by (negative to move up)
columns: The number of columns to move right by (negative to move left)
select: If True, select text between the old and new location.
center: If True, scroll such that the cursor is centered.
record_width: If True, record the cursor column cell width after navigating
so that we jump back to the same width the next time we move to a row
that is wide enough.
"""
clamp_visitable = self.clamp_visitable
_start, end = self.selection
current_row, current_column = end
target = clamp_visitable((current_row + rows, current_column + columns))
self.move_cursor(target, select, center, record_width)
def select_line(self, index: int) -> None:
"""Select all the text in the specified line.
Args:
index: The index of the line to select (starting from 0).
"""
try:
line = self.document[index]
except IndexError:
return
else:
self.selection = Selection((index, 0), (index, len(line)))
self.record_cursor_width()
def action_select_line(self) -> None:
"""Select all the text on the current line."""
cursor_row, _ = self.cursor_location
self.select_line(cursor_row)
def select_all(self) -> None:
"""Select all of the text in the `TextArea`."""
last_line = self.document.line_count - 1
length_of_last_line = len(self.document[last_line])
selection_start = (0, 0)
selection_end = (last_line, length_of_last_line)
self.selection = Selection(selection_start, selection_end)
self.record_cursor_width()
def action_select_all(self) -> None:
"""Select all the text in the document."""
self.select_all()
@property
def cursor_location(self) -> Location:
"""The current location of the cursor in the document.
This is a utility for accessing the `end` of `TextArea.selection`.
"""
return self.selection.end
@cursor_location.setter
def cursor_location(self, location: Location) -> None:
"""Set the cursor_location to a new location.
If a selection is in progress, the anchor point will remain.
"""
self.move_cursor(location, select=not self.selection.is_empty)
@property
def cursor_screen_offset(self) -> Offset:
"""The offset of the cursor relative to the screen."""
cursor_x, cursor_y = self._cursor_offset
scroll_x, scroll_y = self.scroll_offset
region_x, region_y, _width, _height = self.content_region
offset_x = region_x + cursor_x - scroll_x + self.gutter_width
offset_y = region_y + cursor_y - scroll_y
return Offset(offset_x, offset_y)
@property
def cursor_at_first_line(self) -> bool:
"""True if and only if the cursor is on the first line."""
return self.selection.end[0] == 0
@property
def cursor_at_last_line(self) -> bool:
"""True if and only if the cursor is on the last line."""
return self.selection.end[0] == self.document.line_count - 1
@property
def cursor_at_start_of_line(self) -> bool:
"""True if and only if the cursor is at column 0."""
return self.selection.end[1] == 0
@property
def cursor_at_end_of_line(self) -> bool:
"""True if and only if the cursor is at the end of a row."""
cursor_row, cursor_column = self.selection.end
row_length = len(self.document[cursor_row])
cursor_at_end = cursor_column == row_length
return cursor_at_end
@property
def cursor_at_start_of_text(self) -> bool:
"""True if and only if the cursor is at location (0, 0)"""
return self.selection.end == (0, 0)
@property
def cursor_at_end_of_text(self) -> bool:
"""True if and only if the cursor is at the very end of the document."""
return self.cursor_at_last_line and self.cursor_at_end_of_line
# ------ Cursor movement actions
def action_cursor_left(self, select: bool = False) -> None:
"""Move the cursor one location to the left.
If the cursor is at the left edge of the document, try to move it to
the end of the previous line.
If text is selected, move the cursor to the start of the selection.
Args:
select: If True, select the text while moving.
"""
if not self._has_cursor:
self.scroll_left()
return
target = (
self.get_cursor_left_location()
if select or self.selection.is_empty
else min(*self.selection)
)
self.move_cursor(target, select=select)
def get_cursor_left_location(self) -> Location:
"""Get the location the cursor will move to if it moves left.
Returns:
The location of the cursor if it moves left.
"""
return self.navigator.get_location_left(self.cursor_location)
def action_cursor_right(self, select: bool = False) -> None:
"""Move the cursor one location to the right.
If the cursor is at the end of a line, attempt to go to the start of the next line.
If text is selected, move the cursor to the end of the selection.
Args:
select: If True, select the text while moving.
"""
if not self._has_cursor:
self.scroll_right()
return
if self.suggestion:
self.insert(self.suggestion)
return
target = (
self.get_cursor_right_location()
if select or self.selection.is_empty
else max(*self.selection)
)
self.move_cursor(target, select=select)
def get_cursor_right_location(self) -> Location:
"""Get the location the cursor will move to if it moves right.
Returns:
the location the cursor will move to if it moves right.
"""
return self.navigator.get_location_right(self.cursor_location)
def action_cursor_down(self, select: bool = False) -> None:
"""Move the cursor down one cell.
Args:
select: If True, select the text while moving.
"""
if not self._has_cursor:
self.scroll_down()
return
target = self.get_cursor_down_location()
self.move_cursor(target, record_width=False, select=select)
def get_cursor_down_location(self) -> Location:
"""Get the location the cursor will move to if it moves down.
Returns:
The location the cursor will move to if it moves down.
"""
return self.navigator.get_location_below(self.cursor_location)
def action_cursor_up(self, select: bool = False) -> None:
"""Move the cursor up one cell.
Args:
select: If True, select the text while moving.
"""
if not self._has_cursor:
self.scroll_up()
return
target = self.get_cursor_up_location()
self.move_cursor(target, record_width=False, select=select)
def get_cursor_up_location(self) -> Location:
"""Get the location the cursor will move to if it moves up.
Returns:
The location the cursor will move to if it moves up.
"""
return self.navigator.get_location_above(self.cursor_location)
def action_cursor_line_end(self, select: bool = False) -> None:
"""Move the cursor to the end of the line."""
if not self._has_cursor:
self.scroll_end()
return
location = self.get_cursor_line_end_location()
self.move_cursor(location, select=select)
def get_cursor_line_end_location(self) -> Location:
"""Get the location of the end of the current line.
Returns:
The (row, column) location of the end of the cursors current line.
"""
return self.navigator.get_location_end(self.cursor_location)
def action_cursor_line_start(self, select: bool = False) -> None:
"""Move the cursor to the start of the line."""
if not self._has_cursor:
self.scroll_home()
return
target = self.get_cursor_line_start_location(smart_home=True)
self.move_cursor(target, select=select)
def get_cursor_line_start_location(self, smart_home: bool = False) -> Location:
"""Get the location of the start of the current line.
Args:
smart_home: If True, use "smart home key" behavior - go to the first
non-whitespace character on the line, and if already there, go to
offset 0. Smart home only works when wrapping is disabled.
Returns:
The (row, column) location of the start of the cursors current line.
"""
return self.navigator.get_location_home(
self.cursor_location, smart_home=smart_home
)
def action_cursor_word_left(self, select: bool = False) -> None:
"""Move the cursor left by a single word, skipping trailing whitespace.
Args:
select: Whether to select while moving the cursor.
"""
if not self.show_cursor:
return
if self.cursor_at_start_of_text:
return
target = self.get_cursor_word_left_location()
self.move_cursor(target, select=select)
def get_cursor_word_left_location(self) -> Location:
"""Get the location the cursor will jump to if it goes 1 word left.
Returns:
The location the cursor will jump on "jump word left".
"""
cursor_row, cursor_column = self.cursor_location
if cursor_row > 0 and cursor_column == 0:
# Going to the previous row
return cursor_row - 1, len(self.document[cursor_row - 1])
# Staying on the same row
line = self.document[cursor_row][:cursor_column]
search_string = line.rstrip()
matches = list(re.finditer(self._word_pattern, search_string))
cursor_column = matches[-1].start() if matches else 0
return cursor_row, cursor_column
def action_cursor_word_right(self, select: bool = False) -> None:
"""Move the cursor right by a single word, skipping leading whitespace."""
if not self.show_cursor:
return
if self.cursor_at_end_of_text:
return
target = self.get_cursor_word_right_location()
self.move_cursor(target, select=select)
def get_cursor_word_right_location(self) -> Location:
"""Get the location the cursor will jump to if it goes 1 word right.
Returns:
The location the cursor will jump on "jump word right".
"""
cursor_row, cursor_column = self.selection.end
line = self.document[cursor_row]
if cursor_row < self.document.line_count - 1 and cursor_column == len(line):
# Moving to the line below
return cursor_row + 1, 0
# Staying on the same line
search_string = line[cursor_column:]
pre_strip_length = len(search_string)
search_string = search_string.lstrip()
strip_offset = pre_strip_length - len(search_string)
matches = list(re.finditer(self._word_pattern, search_string))
if matches:
cursor_column += matches[0].start() + strip_offset
else:
cursor_column = len(line)
return cursor_row, cursor_column
def action_cursor_page_up(self) -> None:
"""Move the cursor and scroll up one page."""
if not self.show_cursor:
self.scroll_page_up()
return
height = self.content_size.height
_, cursor_location = self.selection
target = self.navigator.get_location_at_y_offset(
cursor_location,
-height,
)
self.scroll_relative(y=-height, animate=False)
self.move_cursor(target)
def action_cursor_page_down(self) -> None:
"""Move the cursor and scroll down one page."""
if not self.show_cursor:
self.scroll_page_down()
return
height = self.content_size.height
_, cursor_location = self.selection
target = self.navigator.get_location_at_y_offset(
cursor_location,
height,
)
self.scroll_relative(y=height, animate=False)
self.move_cursor(target)
def get_column_width(self, row: int, column: int) -> int:
"""Get the cell offset of the column from the start of the row.
Args:
row: The row index.
column: The column index (codepoint offset from start of row).
Returns:
The cell width of the column relative to the start of the row.
"""
line = self.document[row]
return cell_len(expand_tabs_inline(line[:column], self.indent_width))
def record_cursor_width(self) -> None:
"""Record the current cell width of the cursor.
This is used where we navigate up and down through rows.
If we're in the middle of a row, and go down to a row with no
content, then we go down to another row, we want our cursor to
jump back to the same offset that we were originally at.
"""
cursor_x_offset, _ = self.wrapped_document.location_to_offset(
self.cursor_location
)
self.navigator.last_x_offset = cursor_x_offset
# --- Editor operations
def insert(
self,
text: str,
location: Location | None = None,
*,
maintain_selection_offset: bool = True,
) -> EditResult:
"""Insert text into the document.
Args:
text: The text to insert.
location: The location to insert text, or None to use the cursor location.
maintain_selection_offset: If True, the active Selection will be updated
such that the same text is selected before and after the selection,
if possible. Otherwise, the cursor will jump to the end point of the
edit.
Returns:
An `EditResult` containing information about the edit.
"""
if len(text) > 1:
self._restart_blink()
if location is None:
location = self.cursor_location
return self.edit(Edit(text, location, location, maintain_selection_offset))
def delete(
self,
start: Location,
end: Location,
*,
maintain_selection_offset: bool = True,
) -> EditResult:
"""Delete the text between two locations in the document.
Args:
start: The start location.
end: The end location.
maintain_selection_offset: If True, the active Selection will be updated
such that the same text is selected before and after the selection,
if possible. Otherwise, the cursor will jump to the end point of the
edit.
Returns:
An `EditResult` containing information about the edit.
"""
return self.edit(Edit("", start, end, maintain_selection_offset))
def replace(
self,
insert: str,
start: Location,
end: Location,
*,
maintain_selection_offset: bool = True,
) -> EditResult:
"""Replace text in the document with new text.
Args:
insert: The text to insert.
start: The start location
end: The end location.
maintain_selection_offset: If True, the active Selection will be updated
such that the same text is selected before and after the selection,
if possible. Otherwise, the cursor will jump to the end point of the
edit.
Returns:
An `EditResult` containing information about the edit.
"""
return self.edit(Edit(insert, start, end, maintain_selection_offset))
def clear(self) -> EditResult:
"""Delete all text from the document.
Returns:
An EditResult relating to the deletion of all content.
"""
return self.delete((0, 0), self.document.end, maintain_selection_offset=False)
def _delete_via_keyboard(
self,
start: Location,
end: Location,
) -> EditResult | None:
"""Handle a deletion performed using a keyboard (as opposed to the API).
Args:
start: The start location of the text to delete.
end: The end location of the text to delete.
Returns:
An EditResult or None if no edit was performed (e.g. on read-only mode).
"""
if self.read_only:
return None
return self.delete(start, end, maintain_selection_offset=False)
def _replace_via_keyboard(
self,
insert: str,
start: Location,
end: Location,
) -> EditResult | None:
"""Handle a replacement performed using a keyboard (as opposed to the API).
Args:
insert: The text to insert into the document.
start: The start location of the text to replace.
end: The end location of the text to replace.
Returns:
An EditResult or None if no edit was performed (e.g. on read-only mode).
"""
if self.read_only:
return None
return self.replace(insert, start, end, maintain_selection_offset=False)
def action_delete_left(self) -> None:
"""Deletes the character to the left of the cursor and updates the cursor location.
If there's a selection, then the selected range is deleted."""
if self.read_only:
return
selection = self.selection
start, end = selection
if selection.is_empty:
end = self.get_cursor_left_location()
self._delete_via_keyboard(start, end)
def action_delete_right(self) -> None:
"""Deletes the character to the right of the cursor and keeps the cursor at the same location.
If there's a selection, then the selected range is deleted."""
if self.read_only:
return
selection = self.selection
start, end = selection
if selection.is_empty:
end = self.get_cursor_right_location()
self._delete_via_keyboard(start, end)
def action_delete_line(self) -> None:
"""Deletes the lines which intersect with the selection."""
if self.read_only:
return
self._delete_cursor_line()
def _delete_cursor_line(self) -> EditResult | None:
"""Deletes the line (including the line terminator) that the cursor is on."""
start, end = self.selection
start, end = sorted((start, end))
start_row, _start_column = start
end_row, end_column = end
# Generally editors will only delete line the end line of the
# selection if the cursor is not at column 0 of that line.
if start_row != end_row and end_column == 0 and end_row >= 0:
end_row -= 1
from_location = (start_row, 0)
to_location = (end_row + 1, 0)
deletion = self._delete_via_keyboard(from_location, to_location)
if deletion is not None:
self.move_cursor_relative(columns=end_column, record_width=False)
return deletion
def action_cut(self) -> None:
"""Cut text (remove and copy to clipboard)."""
if self.read_only:
return
start, end = self.selection
if start == end:
edit_result = self._delete_cursor_line()
else:
edit_result = self._delete_via_keyboard(start, end)
if edit_result is not None:
self.app.copy_to_clipboard(edit_result.replaced_text)
def action_copy(self) -> None:
"""Copy selection to clipboard."""
selected_text = self.selected_text
if selected_text:
self.app.copy_to_clipboard(selected_text)
else:
raise SkipAction()
def action_paste(self) -> None:
"""Paste from local clipboard."""
if self.read_only:
return
clipboard = self.app.clipboard
if result := self._replace_via_keyboard(clipboard, *self.selection):
self.move_cursor(result.end_location)
def action_delete_to_start_of_line(self) -> None:
"""Deletes from the cursor location to the start of the line."""
from_location = self.selection.end
to_location = self.get_cursor_line_start_location()
self._delete_via_keyboard(from_location, to_location)
def action_delete_to_end_of_line(self) -> None:
"""Deletes from the cursor location to the end of the line."""
from_location = self.selection.end
to_location = self.get_cursor_line_end_location()
self._delete_via_keyboard(from_location, to_location)
async def action_delete_to_end_of_line_or_delete_line(self) -> None:
"""Deletes from the cursor location to the end of the line, or deletes the line.
The line will be deleted if the line is empty.
"""
# Assume we're just going to delete to the end of the line.
action = "delete_to_end_of_line"
if self.get_cursor_line_start_location() == self.get_cursor_line_end_location():
# The line is empty, so we'll simply remove the line itself.
action = "delete_line"
elif (
self.selection.start
== self.selection.end
== self.get_cursor_line_end_location()
):
# We're at the end of the line, so the kill delete operation
# should join the next line to this.
action = "delete_right"
await self.run_action(action)
def action_delete_word_left(self) -> None:
"""Deletes the word to the left of the cursor and updates the cursor location."""
if self.cursor_at_start_of_text:
return
# If there's a non-zero selection, then "delete word left" typically only
# deletes the characters within the selection range, ignoring word boundaries.
start, end = self.selection
if start != end:
self._delete_via_keyboard(start, end)
return
to_location = self.get_cursor_word_left_location()
self._delete_via_keyboard(self.selection.end, to_location)
def action_delete_word_right(self) -> None:
"""Deletes the word to the right of the cursor and keeps the cursor at the same location.
Note that the location that we delete to using this action is not the same
as the location we move to when we move the cursor one word to the right.
This action does not skip leading whitespace, whereas cursor movement does.
"""
if self.cursor_at_end_of_text:
return
start, end = self.selection
if start != end:
self._delete_via_keyboard(start, end)
return
cursor_row, cursor_column = end
# Check the current line for a word boundary
line = self.document[cursor_row][cursor_column:]
matches = list(re.finditer(self._word_pattern, line))
current_row_length = len(self.document[cursor_row])
if matches:
to_location = (cursor_row, cursor_column + matches[0].end())
elif (
cursor_row < self.document.line_count - 1
and cursor_column == current_row_length
):
to_location = (cursor_row + 1, 0)
else:
to_location = (cursor_row, current_row_length)
self._delete_via_keyboard(end, to_location)
@lru_cache(maxsize=128)
def build_byte_to_codepoint_dict(data: bytes) -> dict[int, int]:
"""Build a mapping of utf-8 byte offsets to codepoint offsets for the given data.
Args:
data: utf-8 bytes.
Returns:
A `dict[int, int]` mapping byte indices to codepoint indices within `data`.
"""
byte_to_codepoint: dict[int, int] = {}
current_byte_offset = 0
code_point_offset = 0
while current_byte_offset < len(data):
byte_to_codepoint[current_byte_offset] = code_point_offset
first_byte = data[current_byte_offset]
# Single-byte character
if (first_byte & 0b10000000) == 0:
current_byte_offset += 1
# 2-byte character
elif (first_byte & 0b11100000) == 0b11000000:
current_byte_offset += 2
# 3-byte character
elif (first_byte & 0b11110000) == 0b11100000:
current_byte_offset += 3
# 4-byte character
elif (first_byte & 0b11111000) == 0b11110000:
current_byte_offset += 4
else:
raise ValueError(f"Invalid UTF-8 byte: {first_byte}")
code_point_offset += 1
# Mapping for the end of the string
byte_to_codepoint[current_byte_offset] = code_point_offset
return byte_to_codepoint
| TextArea |
python | fluentpython__example-code-2e | 24-class-metaprog/timeslice.py | {
"start": 461,
"end": 1803
} | class ____():
def __init__(self, arg):
if isinstance(arg, slice):
h = arg.start or 0
m = arg.stop or 0
s = arg.step or 0
else:
h, m, s = 0, 0, arg
if m in (AM, PM):
self.pm = m == PM
m = 0
elif s in (AM, PM):
self.pm = s == PM
s = 0
else:
self.pm = None
self.h, self.m, self.s = h, m, s
def __class_getitem__(cls, arg):
return cls(arg)
def __getitem__(self, arg):
return(type(self)(arg))
def __repr__(self):
h, m, s = self.h, self.m, self.s or None
if m == 0:
m = f'OO'
elif m < 10:
m = f'O{m}'
s = '' if s is None else s
if self.pm is None:
pm = ''
else:
pm = ':' + ('AM', 'PM')[self.pm]
return f'T[{h}:{m}{s}{pm}]'
def __iter__(self):
yield from (self.h, self.m, self.s)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __lt__(self, other):
return tuple(self) < tuple(other)
def __add__(self, other):
"""
>>> T[11:O5:AM] + 15 # TODO: preserve pm field
T[11:20]
"""
if isinstance(other, int):
return self[self.h:self.m + other:self.pm]
| T |
python | kamyu104__LeetCode-Solutions | Python/toeplitz-matrix.py | {
"start": 33,
"end": 345
} | class ____(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
return all(i == 0 or j == 0 or matrix[i-1][j-1] == val
for i, row in enumerate(matrix)
for j, val in enumerate(row))
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 194834,
"end": 197929
} | class ____(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.cached_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
def testNoConvert(self):
# Tests with Tensor.op requires a graph.
with ops.Graph().as_default():
# Make sure converting to the same data type creates only an identity op
with self.cached_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEqual(y.op.type, "Identity")
self.assertEqual(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.cached_session():
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
self._convert([0, 2**32], dtypes.int64, dtypes.int32, [0, 1])
self._convert([0, 1], dtypes.int32, dtypes.int64, [0, 2**32])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.cached_session():
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.cached_session():
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.cached_session():
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8, [0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16, [0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8, [0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16, [0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16, [0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16, [0, 255 * 256])
| ConvertImageTest |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_poly_loading.py | {
"start": 6371,
"end": 8495
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsExecutionResults
):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
adata = Column(String(50))
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "a",
}
class ASub(A):
__tablename__ = "asub"
id = Column(ForeignKey("a.id"), primary_key=True)
asubdata = Column(String(50))
__mapper_args__ = {
"polymorphic_load": "selectin",
"polymorphic_identity": "asub",
}
@classmethod
def insert_data(cls, connection):
ASub = cls.classes.ASub
s = Session(connection)
s.add_all(
[
ASub(id=i, adata=f"adata {i}", asubdata=f"asubdata {i}")
for i in range(1, 1255)
]
)
s.commit()
def test_chunking(self):
A = self.classes.A
s = fixture_session()
with self.sql_execution_asserter(testing.db) as asserter:
asubs = s.scalars(select(A).order_by(A.id))
eq_(len(asubs.all()), 1254)
poly_load_sql = (
"SELECT asub.id AS asub_id, a.id AS a_id, a.type AS a_type, "
"asub.asubdata AS asub_asubdata FROM a JOIN asub "
"ON a.id = asub.id WHERE a.id "
"IN (__[POSTCOMPILE_primary_keys]) ORDER BY a.id"
)
asserter.assert_(
CompiledSQL(
"SELECT a.id, a.adata, a.type FROM a ORDER BY a.id", []
),
CompiledSQL(
poly_load_sql, [{"primary_keys": list(range(1, 501))}]
),
CompiledSQL(
poly_load_sql, [{"primary_keys": list(range(501, 1001))}]
),
CompiledSQL(
poly_load_sql, [{"primary_keys": list(range(1001, 1255))}]
),
)
| ChunkingTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1598908,
"end": 1599126
} | class ____(sgqlc.types.Union):
"""The results of a search."""
__schema__ = github_schema
__types__ = (App, Discussion, Issue, MarketplaceListing, Organization, PullRequest, Repository, User)
| SearchResultItem |
python | keras-team__keras | keras/src/ops/image_test.py | {
"start": 8937,
"end": 34107
} | class ____(testing.TestCase):
def setUp(self):
# Defaults to channels_last
self.data_format = backend.image_data_format()
backend.set_image_data_format("channels_last")
return super().setUp()
def tearDown(self):
backend.set_image_data_format(self.data_format)
return super().tearDown()
def test_rgb_to_grayscale(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (20, 20, 1))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (1, 20, 20))
def test_rgb_to_hsv(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (3, 20, 20))
def test_hsv_to_rgb(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (3, 20, 20))
def test_resize(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (15, 15, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (3, 15, 15))
def test_affine_transform(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
transform = KerasTensor([8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
transform = KerasTensor([8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (3, 20, 20))
def test_extract_patches(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (4, 4, 75))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (4, 4, 75))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (75, 4, 4))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (75, 4, 4))
def test_extract_patches_3d(self):
# Test channels_last
x = KerasTensor([20, 20, 20, 3])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (4, 4, 4, 375))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (4, 4, 4, 375))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20, 20])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (375, 4, 4, 4))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (375, 4, 4, 4))
def test_map_coordinates(self):
input = KerasTensor([20, 20, 3])
coordinates = KerasTensor([3, 15, 15, 3])
out = kimage.map_coordinates(input, coordinates, 0)
self.assertEqual(out.shape, coordinates.shape[1:])
def test_map_coordinates_uint8(self):
image_uint8 = tf.ones((1, 1, 3), dtype=tf.uint8)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = kimage.map_coordinates(
image_uint8, coordinates, order=1, fill_mode="constant"
)
assert out.shape == coordinates.shape[1:]
def test_map_coordinates_float32(self):
image_float32 = tf.ones((1, 1, 3), dtype=tf.float32)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = kimage.map_coordinates(
image_float32, coordinates, order=1, fill_mode="constant"
)
assert out.shape == coordinates.shape[1:]
def test_map_coordinates_nearest(self):
image_uint8 = tf.ones((1, 1, 3), dtype=tf.uint8)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = kimage.map_coordinates(
image_uint8, coordinates, order=1, fill_mode="nearest"
)
assert out.shape == coordinates.shape[1:]
def test_map_coordinates_manual_cast(self):
image_uint8 = tf.ones((1, 1, 3), dtype=tf.uint8)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
image_uint8_casted = tf.cast(image_uint8, dtype=tf.float32)
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = tf.cast(
kimage.map_coordinates(
image_uint8_casted, coordinates, order=1, fill_mode="constant"
),
dtype=tf.uint8,
)
assert out.shape == coordinates.shape[1:]
def test_pad_images(self):
# Test channels_last
x = KerasTensor([15, 25, 3])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (20, 30, 3))
x_batch = KerasTensor([2, 15, 25, 3])
out_batch = kimage.pad_images(
x_batch, 2, 3, target_height=20, target_width=30
)
self.assertEqual(out_batch.shape, (2, 20, 30, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 15, 25])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (3, 20, 30))
x_batch = KerasTensor([2, 3, 15, 25])
out_batch = kimage.pad_images(
x_batch, 2, 3, target_height=20, target_width=30
)
self.assertEqual(out_batch.shape, (2, 3, 20, 30))
def test_crop_images(self):
# Test channels_last
x = KerasTensor([15, 25, 3])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (10, 20, 3))
x_batch = KerasTensor([2, 15, 25, 3])
out_batch = kimage.crop_images(
x_batch, 2, 3, target_height=10, target_width=20
)
self.assertEqual(out_batch.shape, (2, 10, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 15, 25])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (3, 10, 20))
# Test channels_first and batched
x_batch = KerasTensor([2, 3, 15, 25])
out_batch = kimage.crop_images(
x_batch, 2, 3, target_height=10, target_width=20
)
self.assertEqual(out_batch.shape, (2, 3, 10, 20))
def test_perspective_transform(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
start_points = KerasTensor([4, 2])
end_points = KerasTensor([4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
start_points = KerasTensor([4, 2])
end_points = KerasTensor([4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (3, 20, 20))
def test_gaussian_blur(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
kernel_size = KerasTensor(
[
2,
]
)
sigma = KerasTensor(
[
2,
]
)
out = kimage.gaussian_blur(x, kernel_size, sigma)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
kernel_size = KerasTensor(
[
2,
]
)
sigma = KerasTensor(
[
2,
]
)
out = kimage.gaussian_blur(x, kernel_size, sigma)
self.assertEqual(out.shape, (3, 20, 20))
def test_elastic_transform(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (3, 20, 20))
def test_scale_and_translate(self):
images = KerasTensor([20, 20, 3])
output_shape = (25, 25, 3)
scale = KerasTensor([2])
translation = KerasTensor([2])
out = kimage.scale_and_translate(
images,
output_shape=output_shape,
scale=scale,
translation=translation,
spatial_dims=(0, 1),
method="linear",
)
self.assertEqual(out.shape, output_shape)
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
def _compute_affine_transform_coordinates(image, transform):
image = image.copy()
transform = transform.copy()
need_squeeze = False
if len(image.shape) == 3: # unbatched
need_squeeze = True
image = np.expand_dims(image, axis=0)
transform = np.expand_dims(transform, axis=0)
batch_size = image.shape[0]
# get indices
meshgrid = np.meshgrid(
*[np.arange(size) for size in image.shape[1:]], indexing="ij"
)
indices = np.concatenate(
[np.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = np.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
transform[:, 4], transform[:, 0] = (
transform[:, 0].copy(),
transform[:, 4].copy(),
)
transform[:, 5], transform[:, 2] = (
transform[:, 2].copy(),
transform[:, 5].copy(),
)
# deal with transform
transform = np.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1)
transform = np.reshape(transform, (batch_size, 3, 3))
offset = np.pad(transform[:, 0:2, 2], pad_width=[[0, 0], [0, 1]])
transform[:, 0:2, 2] = 0
# transform the indices
coordinates = np.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = np.moveaxis(coordinates, source=-1, destination=1)
coordinates += np.reshape(offset, newshape=(*offset.shape, 1, 1, 1))
if need_squeeze:
coordinates = np.squeeze(coordinates, axis=0)
return coordinates
def _fixed_map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation
# outside the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
padding = [
(
max(-np.floor(c.min()).astype(int) + 1, 0),
max(np.ceil(c.max()).astype(int) + 1 - size, 0),
)
for c, size in zip(coordinates, input.shape)
]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
"nearest": "edge",
"mirror": "reflect",
"reflect": "symmetric",
}.get(fill_mode, fill_mode)
if fill_mode == "constant":
padded = np.pad(
input, padding, mode=pad_mode, constant_values=fill_value
)
else:
padded = np.pad(input, padding, mode=pad_mode)
result = scipy.ndimage.map_coordinates(
padded, shifted_coords, order=order, mode=fill_mode, cval=fill_value
)
return result
def _perspective_transform_numpy(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if len(start_points.shape) == 2:
start_points = np.expand_dims(start_points, axis=0)
if len(end_points.shape) == 2:
end_points = np.expand_dims(end_points, axis=0)
if data_format == "channels_first":
images = np.transpose(images, (0, 2, 3, 1))
batch_size, height, width, channels = images.shape
transforms = _compute_homography_matrix(start_points, end_points)
if len(transforms.shape) == 1:
transforms = np.expand_dims(transforms, axis=0)
if transforms.shape[0] == 1 and batch_size > 1:
transforms = np.tile(transforms, (batch_size, 1))
x, y = np.meshgrid(
np.arange(width, dtype=np.float32),
np.arange(height, dtype=np.float32),
indexing="xy",
)
output = np.empty((batch_size, height, width, channels))
for i in range(batch_size):
a0, a1, a2, a3, a4, a5, a6, a7 = transforms[i]
denom = a6 * x + a7 * y + 1.0
x_in = (a0 * x + a1 * y + a2) / denom
y_in = (a3 * x + a4 * y + a5) / denom
coords = np.stack([y_in.ravel(), x_in.ravel()], axis=0)
mapped_channels = []
for channel in range(channels):
channel_img = images[i, :, :, channel]
mapped_channel = _fixed_map_coordinates(
channel_img,
coords,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode="constant",
fill_value=fill_value,
)
mapped_channels.append(mapped_channel.reshape(height, width))
output[i] = np.stack(mapped_channels, axis=-1)
if data_format == "channels_first":
output = np.transpose(output, (0, 3, 1, 2))
if need_squeeze:
output = np.squeeze(output, axis=0)
return output
def gaussian_blur_np(
images,
kernel_size,
sigma,
data_format=None,
):
def _create_gaussian_kernel(kernel_size, sigma, num_channels, dtype):
def _get_gaussian_kernel1d(size, sigma):
x = np.arange(size, dtype=dtype) - (size - 1) / 2
kernel1d = np.exp(-0.5 * (x / sigma) ** 2)
return kernel1d / np.sum(kernel1d)
def _get_gaussian_kernel2d(size, sigma):
kernel1d_x = _get_gaussian_kernel1d(size[0], sigma[0])
kernel1d_y = _get_gaussian_kernel1d(size[1], sigma[1])
return np.outer(kernel1d_y, kernel1d_x)
kernel = _get_gaussian_kernel2d(kernel_size, sigma)
kernel = kernel[:, :, np.newaxis]
kernel = np.tile(kernel, (1, 1, num_channels))
return kernel.astype(dtype)
images = np.asarray(images)
input_dtype = images.dtype
kernel_size = np.asarray(kernel_size)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_first":
images = np.transpose(images, (0, 2, 3, 1))
num_channels = images.shape[-1]
kernel = _create_gaussian_kernel(
kernel_size, sigma, num_channels, input_dtype
)
batch_size, height, width, _ = images.shape
padded_images = np.pad(
images,
(
(0, 0),
(kernel_size[0] // 2, kernel_size[0] // 2),
(kernel_size[1] // 2, kernel_size[1] // 2),
(0, 0),
),
mode="constant",
)
blurred_images = np.zeros_like(images)
kernel_reshaped = kernel.reshape(
(1, kernel.shape[0], kernel.shape[1], num_channels)
)
for b in range(batch_size):
image_patch = padded_images[b : b + 1, :, :, :]
for i in range(height):
for j in range(width):
patch = image_patch[
:, i : i + kernel_size[0], j : j + kernel_size[1], :
]
blurred_images[b, i, j, :] = np.sum(
patch * kernel_reshaped, axis=(1, 2)
)
if data_format == "channels_first":
blurred_images = np.transpose(blurred_images, (0, 3, 1, 2))
if need_squeeze:
blurred_images = np.squeeze(blurred_images, axis=0)
return blurred_images
def elastic_transform_np(
images,
alpha=20.0,
sigma=5.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
seed=None,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
images = np.asarray(images)
input_dtype = images.dtype
alpha = np.asarray(alpha, dtype=input_dtype)
sigma = np.asarray(sigma, dtype=input_dtype)
kernel_size = (int(6 * sigma) | 1, int(6 * sigma) | 1)
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_last":
batch_size, height, width, channels = images.shape
channel_axis = -1
else:
batch_size, channels, height, width = images.shape
channel_axis = 1
rng = np.random.default_rng([seed, 0])
dx = (
rng.normal(size=(batch_size, height, width), loc=0.0, scale=1.0).astype(
input_dtype
)
* sigma
)
dy = (
rng.normal(size=(batch_size, height, width), loc=0.0, scale=1.0).astype(
input_dtype
)
* sigma
)
dx = gaussian_blur_np(
np.expand_dims(dx, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dy = gaussian_blur_np(
np.expand_dims(dy, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dx = np.squeeze(dx)
dy = np.squeeze(dy)
x, y = np.meshgrid(np.arange(width), np.arange(height))
x, y = x[None, :, :], y[None, :, :]
distorted_x = x + alpha * dx
distorted_y = y + alpha * dy
transformed_images = np.zeros_like(images)
if data_format == "channels_last":
for i in range(channels):
transformed_images[..., i] = np.stack(
[
_fixed_map_coordinates(
images[b, ..., i],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
]
)
else:
for i in range(channels):
transformed_images[:, i, :, :] = np.stack(
[
_fixed_map_coordinates(
images[b, i, ...],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
]
)
if need_squeeze:
transformed_images = np.squeeze(transformed_images, axis=0)
transformed_images = transformed_images.astype(input_dtype)
return transformed_images
def _compute_homography_matrix(start_points, end_points):
start_x1, start_y1 = start_points[:, 0, 0], start_points[:, 0, 1]
start_x2, start_y2 = start_points[:, 1, 0], start_points[:, 1, 1]
start_x3, start_y3 = start_points[:, 2, 0], start_points[:, 2, 1]
start_x4, start_y4 = start_points[:, 3, 0], start_points[:, 3, 1]
end_x1, end_y1 = end_points[:, 0, 0], end_points[:, 0, 1]
end_x2, end_y2 = end_points[:, 1, 0], end_points[:, 1, 1]
end_x3, end_y3 = end_points[:, 2, 0], end_points[:, 2, 1]
end_x4, end_y4 = end_points[:, 3, 0], end_points[:, 3, 1]
coefficient_matrix = np.stack(
[
np.stack(
[
end_x1,
end_y1,
np.ones_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
-start_x1 * end_x1,
-start_x1 * end_y1,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
end_x1,
end_y1,
np.ones_like(end_x1),
-start_y1 * end_x1,
-start_y1 * end_y1,
],
axis=-1,
),
np.stack(
[
end_x2,
end_y2,
np.ones_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
-start_x2 * end_x2,
-start_x2 * end_y2,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
end_x2,
end_y2,
np.ones_like(end_x2),
-start_y2 * end_x2,
-start_y2 * end_y2,
],
axis=-1,
),
np.stack(
[
end_x3,
end_y3,
np.ones_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
-start_x3 * end_x3,
-start_x3 * end_y3,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
end_x3,
end_y3,
np.ones_like(end_x3),
-start_y3 * end_x3,
-start_y3 * end_y3,
],
axis=-1,
),
np.stack(
[
end_x4,
end_y4,
np.ones_like(end_x4),
np.zeros_like(end_x4),
np.zeros_like(end_x4),
np.zeros_like(end_x4),
-start_x4 * end_x4,
-start_x4 * end_y4,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x4),
np.zeros_like(end_x4),
np.zeros_like(end_x4),
end_x4,
end_y4,
np.ones_like(end_x4),
-start_y4 * end_x4,
-start_y4 * end_y4,
],
axis=-1,
),
],
axis=1,
)
target_vector = np.stack(
[
start_x1,
start_y1,
start_x2,
start_y2,
start_x3,
start_y3,
start_x4,
start_y4,
],
axis=-1,
)
target_vector = np.expand_dims(target_vector, axis=-1)
homography_matrix = np.linalg.solve(coefficient_matrix, target_vector)
homography_matrix = np.reshape(homography_matrix, [-1, 8])
return homography_matrix
| ImageOpsStaticShapeTest |
python | jamielennox__requests-mock | tests/test_custom_matchers.py | {
"start": 606,
"end": 863
} | class ____(object):
def __init___(self):
self.called = False
def __call__(self, request):
self.called = True
return None
def match_all(request):
return requests_mock.create_response(request, content=b'data')
| FailMatcher |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/alloy_db.py | {
"start": 37777,
"end": 43421
} | class ____(AlloyDBWriteBaseOperator):
"""
Create a User in an Alloy DB cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AlloyDBCreateUserOperator`
:param user_id: Required. ID of the user to create.
:param user_configuration: Required. The user to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.User
:param cluster_id: Required. ID of the cluster for creating a user in.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_request: Optional. If set, performs request validation, but does not actually
execute the request.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"user_id", "user_configuration", "cluster_id"} | set(AlloyDBWriteBaseOperator.template_fields)
)
operator_extra_links = (AlloyDBUsersLink(),)
def __init__(
self,
user_id: str,
user_configuration: alloydb_v1.User | dict,
cluster_id: str,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.user_id = user_id
self.user_configuration = user_configuration
self.cluster_id = cluster_id
def _get_user(self) -> proto.Message | None:
self.log.info("Checking if the user %s exists already...", self.user_id)
try:
user = self.hook.get_user(
user_id=self.user_id,
cluster_id=self.cluster_id,
location=self.location,
project_id=self.project_id,
)
except NotFound:
self.log.info("The user %s does not exist yet.", self.user_id)
except Exception as ex:
raise AirflowException(ex) from ex
else:
self.log.info(
"AlloyDB user %s already exists in the cluster %s.",
self.user_id,
self.cluster_id,
)
result = alloydb_v1.User.to_dict(user)
return result
return None
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location_id": self.location,
"cluster_id": self.cluster_id,
"project_id": self.project_id,
}
def execute(self, context: Context) -> dict | None:
AlloyDBUsersLink.persist(context=context)
if (_user := self._get_user()) is not None:
return _user
if self.validate_request:
self.log.info("Validating a Create AlloyDB user request.")
else:
self.log.info("Creating an AlloyDB user.")
try:
user = self.hook.create_user(
user_id=self.user_id,
cluster_id=self.cluster_id,
user=self.user_configuration,
location=self.location,
project_id=self.project_id,
request_id=self.request_id,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except Exception as ex:
raise AirflowException(ex)
else:
result = alloydb_v1.User.to_dict(user) if not self.validate_request else None
if not self.validate_request:
self.log.info("AlloyDB user %s was successfully created.", self.user_id)
return result
| AlloyDBCreateUserOperator |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_run_launcher.py | {
"start": 1194,
"end": 4075
} | class ____(BaseTestSuite):
def test_run_launcher(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "no_config_job")
result = execute_dagster_graphql(
context=graphql_context,
query=LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
assert result.data["launchPipelineExecution"]["run"]["status"] == "STARTING"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
wait_for_runs_to_finish(graphql_context.instance)
result = execute_dagster_graphql(
context=graphql_context, query=RUN_QUERY, variables={"runId": run_id}
)
assert result.data["pipelineRunOrError"]["__typename"] == "Run"
assert result.data["pipelineRunOrError"]["status"] == "SUCCESS"
def test_run_launcher_subset(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "more_complicated_config", ["noop_op"])
result = execute_dagster_graphql(
context=graphql_context,
query=LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
}
},
)
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
assert result.data["launchPipelineExecution"]["run"]["status"] == "STARTING"
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
wait_for_runs_to_finish(graphql_context.instance)
result = execute_dagster_graphql(
context=graphql_context, query=RUN_QUERY, variables={"runId": run_id}
)
assert result.data["pipelineRunOrError"]["__typename"] == "Run"
assert result.data["pipelineRunOrError"]["status"] == "SUCCESS"
assert result.data["pipelineRunOrError"]["stats"]["stepsSucceeded"] == 1
def test_run_launcher_unauthorized(self, graphql_context: WorkspaceRequestContext):
selector = infer_job_selector(graphql_context, "no_config_job")
with (
patch.object(graphql_context, "has_permission_for_selector", return_value=False),
patch.object(graphql_context, "was_permission_checked", return_value=True),
):
result = execute_dagster_graphql(
context=graphql_context,
query=LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
assert result.data["launchPipelineExecution"]["__typename"] == "UnauthorizedError"
| TestBasicLaunch |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/app_testing/tutorial001_py310/main.py | {
"start": 443,
"end": 2604
} | class ____(SQLModel):
name: str | None = None
secret_name: str | None = None
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(*, session: Session = Depends(get_session), hero: HeroCreate):
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(
*,
session: Session = Depends(get_session),
offset: int = 0,
limit: int = Query(default=100, le=100),
):
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(
*, session: Session = Depends(get_session), hero_id: int, hero: HeroUpdate
):
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
db_hero.sqlmodel_update(hero_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.delete("/heroes/{hero_id}")
def delete_hero(*, session: Session = Depends(get_session), hero_id: int):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
| HeroUpdate |
python | ApeWorX__ape | src/ape_test/_watch.py | {
"start": 1218,
"end": 2729
} | class ____(events.FileSystemEventHandler):
EVENTS_WATCHED = (
events.EVENT_TYPE_CREATED,
events.EVENT_TYPE_DELETED,
events.EVENT_TYPE_MODIFIED,
events.EVENT_TYPE_MOVED,
)
def dispatch(self, event: events.FileSystemEvent) -> None:
if event.event_type in self.EVENTS_WATCHED:
self.process_event(event)
@cached_property
def _extensions_to_watch(self) -> list[str]:
from ape.utils.basemodel import ManagerAccessMixin as access
return [".py", *access.compiler_manager.registered_compilers.keys()]
def _is_path_watched(self, filepath: str) -> bool:
"""
Check if file should trigger pytest run
"""
return any(map(filepath.endswith, self._extensions_to_watch))
def process_event(self, event: events.FileSystemEvent) -> None:
if self._is_path_watched(event.src_path):
emit_trigger()
def _run_ape_test(*pytest_args):
return run_subprocess(["ape", "test", *[f"{a}" for a in pytest_args]])
def _run_main_loop(delay: float, *pytest_args: str) -> None:
global trigger
now = datetime.now()
if trigger and now - trigger > timedelta(seconds=delay):
_run_ape_test(*pytest_args)
with trigger_lock:
trigger = None
time.sleep(delay)
def _create_event_handler():
# Abstracted for testing purposes.
return EventHandler()
def _create_observer():
# Abstracted for testing purposes.
return Observer()
| EventHandler |
python | pyca__cryptography | src/cryptography/x509/ocsp.py | {
"start": 662,
"end": 1172
} | class ____(utils.Enum):
SUCCESSFUL = 0
MALFORMED_REQUEST = 1
INTERNAL_ERROR = 2
TRY_LATER = 3
SIG_REQUIRED = 5
UNAUTHORIZED = 6
_ALLOWED_HASHES = (
hashes.SHA1,
hashes.SHA224,
hashes.SHA256,
hashes.SHA384,
hashes.SHA512,
)
def _verify_algorithm(algorithm: hashes.HashAlgorithm) -> None:
if not isinstance(algorithm, _ALLOWED_HASHES):
raise ValueError(
"Algorithm must be SHA1, SHA224, SHA256, SHA384, or SHA512"
)
| OCSPResponseStatus |
python | spack__spack | lib/spack/spack/util/prefix.py | {
"start": 244,
"end": 2238
} | class ____(str):
"""This class represents an installation prefix, but provides useful attributes for referring
to directories inside the prefix.
Attributes of this object are created on the fly when you request them, so any of the following
are valid:
>>> prefix = Prefix("/usr")
>>> prefix.bin
/usr/bin
>>> prefix.lib64
/usr/lib64
>>> prefix.share.man
/usr/share/man
>>> prefix.foo.bar.baz
/usr/foo/bar/baz
>>> prefix.join("dashed-directory").bin64
/usr/dashed-directory/bin64
Prefix objects behave identically to strings. In fact, they subclass :class:`str`, so operators
like ``+`` are legal::
print("foobar " + prefix)
This prints ``foobar /usr``. All of this is meant to make custom installs easy.
"""
def __getattr__(self, name: str) -> "Prefix":
"""Concatenate a string to a prefix.
Useful for strings that are valid variable names.
Args:
name: the string to append to the prefix
Returns:
the newly created installation prefix
"""
return Prefix(os.path.join(self, name))
def join(self, string: str) -> "Prefix": # type: ignore[override]
"""Concatenate a string to a prefix.
Useful for strings that are not valid variable names. This includes strings containing
characters like ``-`` and ``.``.
Args:
string: the string to append to the prefix
Returns:
the newly created installation prefix
"""
return Prefix(os.path.join(self, string))
def __getstate__(self) -> Dict[str, str]:
"""Control how object is pickled.
Returns:
current state of the object
"""
return self.__dict__
def __setstate__(self, state: Dict[str, str]) -> None:
"""Control how object is unpickled.
Args:
new state of the object
"""
self.__dict__.update(state)
| Prefix |
python | scipy__scipy | scipy/interpolate/tests/test_fitpack2.py | {
"start": 39395,
"end": 50425
} | class ____:
def test_defaults(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
[0,0,-11,0,0],[0,0,4,0,0]])/6.
dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
[2,.25,0,-.25,-2],[4,-1,0,1,-4]])
dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
[-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1),dx)
assert_array_almost_equal(lut(x,y,dy=1),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
def test_derivatives(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([0,0,2./3,0,0])
dy = array([4,-1,0,-.25,-4])
dxdy = array([160,65,0,55,32])/24.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
def make_pair_grid(self, x, y):
"""
Create an array of (xi, yi) pairs for all xi in x and yi in y,
and reshape it to the desired shape.
Parameters
----------
x : array_like
1D array of x-values.
y : array_like
1D array of y-values.
dest_shape : tuple
Desired output shape.
Returns
-------
np.ndarray
Reshaped array of (x, y) pairs.
"""
return np.array([[xi, yi] for xi in x for yi in y])
def test_partial_derivative_method_grid(self):
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1],
[1, 2, 1, 2, 1],
[1, 2, 3, 2, 1],
[1, 2, 2, 2, 1],
[1, 2, 1, 2, 1]])
dx = array([[0, 0, -20, 0, 0],
[0, 0, 13, 0, 0],
[0, 0, 4, 0, 0],
[0, 0, -11, 0, 0],
[0, 0, 4, 0, 0]]) / 6.
dy = array([[4, -1, 0, 1, -4],
[4, -1, 0, 1, -4],
[0, 1.5, 0, -1.5, 0],
[2, .25, 0, -.25, -2],
[4, -1, 0, 1, -4]])
dxdy = array([[40, -25, 0, 25, -40],
[-26, 16.25, 0, -16.25, 26],
[-8, 5, 0, -5, 8],
[22, -13.75, 0, 13.75, -22],
[-8, 5, 0, -5, 8]]) / 6.
lut = RectBivariateSpline(x, y, z)
lut_ndbspline = convert_to_ndbspline(lut)
for orders, expected in [([1, 0], dx), ([0, 1], dy), ([1, 1], dxdy)]:
actual_rect = lut.partial_derivative(*orders)(x, y)
actual_ndb = lut_ndbspline.derivative(orders)(
self.make_pair_grid(x, y)
).reshape(expected.shape)
assert_array_almost_equal(actual_rect, expected)
assert_array_almost_equal(actual_ndb, expected)
def test_partial_derivative_method(self):
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1],
[1, 2, 1, 2, 1],
[1, 2, 3, 2, 1],
[1, 2, 2, 2, 1],
[1, 2, 1, 2, 1]])
expected = {
(1, 0): array([0, 0, 2./3, 0, 0]), # dx
(0, 1): array([4, -1, 0, -.25, -4]), # dy
(1, 1): array([160, 65, 0, 55, 32]) / 24. # dxdy
}
lut = RectBivariateSpline(x, y, z)
lut_ndbspline = convert_to_ndbspline(lut)
points = self.make_pair_grid(x, y) # shape: (25, 2)
# Evaluate only the diagonal points: (x[i], y[i])
diag_idx = np.arange(len(x))
diag_points = points[diag_idx * len(y) + diag_idx]
for orders, expected_vals in expected.items():
dx, dy = orders
# RectBivariateSpline result
actual_rbs = lut.partial_derivative(dx, dy)(x, y, grid=False)
assert_array_almost_equal(actual_rbs, expected_vals)
# NdBSpline result
actual_ndb = lut_ndbspline.derivative([dx, dy])(diag_points)
assert_array_almost_equal(actual_ndb, expected_vals)
def test_partial_derivative_order_too_large(self):
x = array([0, 1, 2, 3, 4], dtype=float)
y = x.copy()
z = ones((x.size, y.size))
lut = RectBivariateSpline(x, y, z)
lut_ndbspline = convert_to_ndbspline(lut)
with assert_raises(ValueError):
lut.partial_derivative(4, 1)
assert (lut_ndbspline.derivative([4, 1]).c == 0.0).all()
def test_broadcast(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xp_assert_close(lut(x, y), lut(x[:,None], y[None,:], grid=False))
def test_invalid_input(self):
with assert_raises(ValueError) as info:
x = array([6, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
RectBivariateSpline(x, y, z)
assert "x must be strictly increasing" in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([2, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
RectBivariateSpline(x, y, z)
assert "y must be strictly increasing" in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1]])
RectBivariateSpline(x, y, z)
assert "x dimension of z must have same number of elements as x"\
in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2], [1, 2, 1, 2], [1, 2, 3, 2],
[1, 2, 2, 2], [1, 2, 1, 2]])
RectBivariateSpline(x, y, z)
assert "y dimension of z must have same number of elements as y"\
in str(info.value)
with assert_raises(ValueError) as info:
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
bbox = (-100, 100, -100)
RectBivariateSpline(x, y, z, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
RectBivariateSpline(x, y, z, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
def test_array_like_input(self):
x = array([1, 2, 3, 4, 5])
y = array([1, 2, 3, 4, 5])
z = array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
bbox = array([1, 5, 1, 5])
spl1 = RectBivariateSpline(x, y, z, bbox=bbox)
spl2 = RectBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
bbox=bbox.tolist())
assert_array_almost_equal(spl1(1.0, 1.0), spl2(1.0, 1.0))
def test_not_increasing_input(self):
# gh-8565
NSamp = 20
Theta = np.random.uniform(0, np.pi, NSamp)
Phi = np.random.uniform(0, 2 * np.pi, NSamp)
Data = np.ones(NSamp)
Interpolator = SmoothSphereBivariateSpline(Theta, Phi, Data, s=3.5)
NLon = 6
NLat = 3
GridPosLats = np.arange(NLat) / NLat * np.pi
GridPosLons = np.arange(NLon) / NLon * 2 * np.pi
# No error
Interpolator(GridPosLats, GridPosLons)
nonGridPosLats = GridPosLats.copy()
nonGridPosLats[2] = 0.001
with assert_raises(ValueError) as exc_info:
Interpolator(nonGridPosLats, GridPosLons)
assert "x must be strictly increasing" in str(exc_info.value)
nonGridPosLons = GridPosLons.copy()
nonGridPosLons[2] = 0.001
with assert_raises(ValueError) as exc_info:
Interpolator(GridPosLats, nonGridPosLons)
assert "y must be strictly increasing" in str(exc_info.value)
def _sample_large_2d_data(self, nx, ny):
rng = np.random.default_rng(1)
x = np.arange(nx)
y = np.arange(ny)
z = rng.integers(0, 100, (nx, ny))
return x, y, z.astype(np.float64)
@pytest.mark.slow()
@pytest.mark.parametrize('shape', [(350, 850), (2000, 170)])
@pytest.mark.parametrize('s_tols', [(0, 1e-12, 1e-7),
(1, 7e-3, 1e-4),
(3, 2e-2, 1e-4)])
def test_spline_large_2d(self, shape, s_tols):
# Reference - https://github.com/scipy/scipy/issues/17787
nx, ny = shape
s, atol, rtol = s_tols
x, y, z = self._sample_large_2d_data(nx, ny)
spl = RectBivariateSpline(x, y, z, s=s)
z_spl = spl(x, y)
assert(not np.isnan(z_spl).any())
xp_assert_close(z_spl, z, atol=atol, rtol=rtol)
@pytest.mark.slow()
@pytest.mark.skipif(sys.maxsize <= 2**32, reason="Segfaults on 32-bit system "
"due to large input data")
def test_spline_large_2d_maxit(self):
# Reference - for https://github.com/scipy/scipy/issues/17787
nx, ny = 1000, 1700
s, atol, rtol = 2, 2e-2, 1e-12
x, y, z = self._sample_large_2d_data(nx, ny)
spl = RectBivariateSpline(x, y, z, s=s, maxit=25)
z_spl = spl(x, y)
assert(not np.isnan(z_spl).any())
xp_assert_close(z_spl, z, atol=atol, rtol=rtol)
| TestRectBivariateSpline |
python | getsentry__sentry | src/sentry/search/events/builder/discover.py | {
"start": 18678,
"end": 20832
} | class ____(DiscoverQueryBuilder):
base_function_acl = ["array_join", "histogram", "spans_histogram"]
def __init__(
self,
num_buckets: int,
histogram_column: str,
histogram_rows: int | None,
histogram_params: HistogramParams,
key_column: str | None,
field_names: list[str | Any | None] | None,
groupby_columns: list[str] | None,
*args: Any,
**kwargs: Any,
):
config = kwargs.get("config", QueryBuilderConfig())
functions_acl = config.functions_acl if config.functions_acl else []
config.functions_acl = functions_acl + self.base_function_acl
kwargs["config"] = config
super().__init__(*args, **kwargs)
self.additional_groupby = groupby_columns
selected_columns = kwargs["selected_columns"]
resolved_histogram = self.resolve_column(histogram_column)
# Reset&Ignore the columns from the QueryBuilder
self.aggregates: list[CurriedFunction] = []
self.columns = [self.resolve_column("count()"), resolved_histogram]
if key_column is not None and field_names is not None:
key_values: list[str] = [field for field in field_names if isinstance(field, str)]
self.where.append(Condition(self.resolve_column(key_column), Op.IN, key_values))
# make sure to bound the bins to get the desired range of results
min_bin = histogram_params.start_offset
self.where.append(Condition(resolved_histogram, Op.GTE, min_bin))
max_bin = histogram_params.start_offset + histogram_params.bucket_size * num_buckets
self.where.append(Condition(resolved_histogram, Op.LTE, max_bin))
if key_column is not None:
self.columns.append(self.resolve_column(key_column))
groups = len(selected_columns) if histogram_rows is None else histogram_rows
self.limit = Limit(groups * num_buckets)
self.orderby = (self.orderby if self.orderby else []) + [
OrderBy(resolved_histogram, Direction.ASC)
]
self.groupby = self.resolve_groupby(groupby_columns)
| HistogramQueryBuilder |
python | pallets__quart | examples/api/src/api/__init__.py | {
"start": 555,
"end": 791
} | class ____(TodoIn):
id: int
@app.post("/todos/")
@validate_request(TodoIn)
@validate_response(Todo)
async def create_todo(data: Todo) -> Todo:
return Todo(id=1, task=data.task, due=data.due)
def run() -> None:
app.run()
| Todo |
python | mlflow__mlflow | mlflow/models/resources.py | {
"start": 6262,
"end": 6936
} | class ____(DatabricksResource):
"""
Define a Databricks Genie Space to serve a model.
Args:
genie_space_id (str): The genie space id
on_behalf_of_user (Optional[bool]): If True, the resource is accessed with
with the permission of the invoker of the model in the serving endpoint. If set to
None or False, the resource is accessed with the permissions of the creator
"""
@property
def type(self) -> ResourceType:
return ResourceType.GENIE_SPACE
def __init__(self, genie_space_id: str, on_behalf_of_user: bool | None = None):
super().__init__(genie_space_id, on_behalf_of_user)
| DatabricksGenieSpace |
python | django__django | django/core/serializers/xml_serializer.py | {
"start": 18171,
"end": 18620
} | class ____(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super().__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| ExternalReferenceForbidden |
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/http_handler.py | {
"start": 2232,
"end": 4624
} | class ____(StorageHandler):
_scheme: str
_cache: ArtifactFileCache
_session: requests.Session
def __init__(self, session: requests.Session, scheme: str = "http") -> None:
self._scheme = scheme
self._cache = get_artifact_file_cache()
self._session = session
def can_handle(self, parsed_url: ParseResult) -> bool:
return parsed_url.scheme == self._scheme
def _get_stream(self, url: str) -> requests.Response:
"""Returns a streaming response from a GET request to the given URL."""
return self._session.get(
url,
stream=True,
cookies=_thread_local_api_settings.cookies,
headers=_thread_local_api_settings.headers,
)
def load_path(
self,
manifest_entry: ArtifactManifestEntry,
local: bool = False,
) -> URIStr | FilePathStr:
if (ref_url := manifest_entry.ref) is None:
raise ValueError("Missing URL on artifact manifest entry")
if not local:
return ref_url
expected_digest = manifest_entry.digest
path, hit, cache_open = self._cache.check_etag_obj_path(
url=ref_url, etag=expected_digest, size=manifest_entry.size or 0
)
if hit:
return path
with self._get_stream(ref_url) as rsp:
entry_info = _HttpEntryInfo.from_response(rsp)
if (digest := entry_info.digest) != expected_digest:
raise ValueError(
f"Digest mismatch for url {ref_url!r}: expected {expected_digest!r} but found {digest!r}"
)
with cache_open(mode="wb") as file:
for data in rsp.iter_content(chunk_size=128 * 1024):
file.write(data)
return path
def store_path(
self,
artifact: Artifact,
path: URIStr | FilePathStr,
name: StrPath | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> list[ArtifactManifestEntry]:
name = name or os.path.basename(path)
if not checksum:
return [ArtifactManifestEntry(path=name, ref=path, digest=path)]
with self._get_stream(path) as rsp:
entry_info = _HttpEntryInfo.from_response(rsp)
return [ArtifactManifestEntry(path=name, **asdict(entry_info))]
| HTTPHandler |
python | django__django | tests/distinct_on_fields/models.py | {
"start": 1014,
"end": 1221
} | class ____(models.Model):
staff = models.ForeignKey(Staff, models.CASCADE)
tag = models.ForeignKey(Tag, models.CASCADE)
def __str__(self):
return "%s -> %s" % (self.tag, self.staff)
| StaffTag |
python | apache__airflow | providers/microsoft/mssql/src/airflow/providers/microsoft/mssql/hooks/mssql.py | {
"start": 1265,
"end": 5447
} | class ____(DbApiHook):
"""
Interact with Microsoft SQL Server.
:param args: passed to DBApiHook
:param sqlalchemy_scheme: Scheme sqlalchemy connection. Default is ``mssql+pymssql`` Only used for
``get_sqlalchemy_engine`` and ``get_sqlalchemy_connection`` methods.
:param kwargs: passed to DbApiHook
"""
conn_name_attr = "mssql_conn_id"
default_conn_name = "mssql_default"
conn_type = "mssql"
hook_name = "Microsoft SQL Server"
supports_autocommit = True
DEFAULT_SQLALCHEMY_SCHEME = "mssql+pymssql"
def __init__(
self,
*args,
sqlalchemy_scheme: str | None = None,
**kwargs,
) -> None:
super().__init__(*args, **{**kwargs, **{"escape_word_format": "[{}]"}})
self.schema = kwargs.pop("schema", None)
self._sqlalchemy_scheme = sqlalchemy_scheme
@property
def sqlalchemy_scheme(self) -> str:
"""Sqlalchemy scheme either from constructor, connection extras or default."""
extra_scheme = self.connection_extra_lower.get("sqlalchemy_scheme")
if not self._sqlalchemy_scheme and extra_scheme and (":" in extra_scheme or "/" in extra_scheme):
raise RuntimeError("sqlalchemy_scheme in connection extra should not contain : or / characters")
return self._sqlalchemy_scheme or extra_scheme or self.DEFAULT_SQLALCHEMY_SCHEME
@property
def dialect_name(self) -> str:
return "mssql"
@property
def dialect(self) -> Dialect:
return MsSqlDialect(self)
def get_uri(self) -> str:
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
r = list(urlsplit(super().get_uri()))
# change pymssql driver:
r[0] = self.sqlalchemy_scheme
# remove query string 'sqlalchemy_scheme' like parameters:
qs = parse_qs(r[3], keep_blank_values=True)
for k in list(qs.keys()):
if k.lower() == "sqlalchemy_scheme":
qs.pop(k, None)
r[3] = urlencode(qs, doseq=True)
return urlunsplit(r)
def get_sqlalchemy_connection(
self, connect_kwargs: dict | None = None, engine_kwargs: dict | None = None
) -> Any:
"""Sqlalchemy connection object."""
engine = self.get_sqlalchemy_engine(engine_kwargs=engine_kwargs)
return engine.connect(**(connect_kwargs or {}))
def get_conn(self) -> PymssqlConnection:
"""Return ``pymssql`` connection object."""
conn = self.connection
extra_conn_args = {key: val for key, val in conn.extra_dejson.items() if key != "sqlalchemy_scheme"}
return pymssql.connect(
server=conn.host or "",
user=conn.login,
password=conn.password,
database=self.schema or conn.schema or "",
port=str(conn.port),
**extra_conn_args,
)
def set_autocommit(
self,
conn: PymssqlConnection,
autocommit: bool,
) -> None:
conn.autocommit(autocommit)
def get_autocommit(self, conn: PymssqlConnection):
return conn.autocommit_state
def get_openlineage_database_info(self, connection) -> DatabaseInfo:
"""Return MSSQL specific information for OpenLineage."""
from airflow.providers.openlineage.sqlparser import DatabaseInfo
return DatabaseInfo(
scheme=self.get_openlineage_database_dialect(connection),
authority=DbApiHook.get_openlineage_authority_part(connection, default_port=1433),
information_schema_columns=[
"table_schema",
"table_name",
"column_name",
"ordinal_position",
"data_type",
"table_catalog",
],
database=self.schema or self.connection.schema,
is_information_schema_cross_db=True,
)
def get_openlineage_database_dialect(self, connection) -> str:
"""Return database dialect."""
return "mssql"
def get_openlineage_default_schema(self) -> str | None:
"""Return current schema."""
return self.get_first("SELECT SCHEMA_NAME();")[0]
| MsSqlHook |
python | PrefectHQ__prefect | src/integrations/prefect-dask/prefect_dask/task_runners.py | {
"start": 3532,
"end": 17501
} | class ____(TaskRunner):
"""
A parallel task_runner that submits tasks to the `dask.distributed` scheduler.
By default a temporary `distributed.LocalCluster` is created (and
subsequently torn down) within the `start()` contextmanager. To use a
different cluster class (e.g.
[`dask_kubernetes.KubeCluster`](https://kubernetes.dask.org/)), you can
specify `cluster_class`/`cluster_kwargs`.
Alternatively, if you already have a dask cluster running, you can provide
the cluster object via the `cluster` kwarg or the address of the scheduler
via the `address` kwarg.
!!! warning "Multiprocessing safety"
Note that, because the `DaskTaskRunner` uses multiprocessing, calls to flows
in scripts must be guarded with `if __name__ == "__main__":` or warnings will
be displayed.
Args:
cluster (distributed.deploy.Cluster, optional): Currently running dask cluster;
if one is not provider (or specified via `address` kwarg), a temporary
cluster will be created in `DaskTaskRunner.start()`. Defaults to `None`.
address (string, optional): Address of a currently running dask
scheduler. Defaults to `None`.
cluster_class (string or callable, optional): The cluster class to use
when creating a temporary dask cluster. Can be either the full
class name (e.g. `"distributed.LocalCluster"`), or the class itself.
cluster_kwargs (dict, optional): Additional kwargs to pass to the
`cluster_class` when creating a temporary dask cluster.
adapt_kwargs (dict, optional): Additional kwargs to pass to `cluster.adapt`
when creating a temporary dask cluster. Note that adaptive scaling
is only enabled if `adapt_kwargs` are provided.
client_kwargs (dict, optional): Additional kwargs to use when creating a
[`dask.distributed.Client`](https://distributed.dask.org/en/latest/api.html#client).
performance_report_path (str, optional): Path where the Dask performance report
will be saved. If not provided, no performance report will be generated.
Examples:
Using a temporary local dask cluster:
```python
from prefect import flow
from prefect_dask.task_runners import DaskTaskRunner
@flow(task_runner=DaskTaskRunner)
def my_flow():
...
```
Using a temporary cluster running elsewhere. Any Dask cluster class should
work, here we use [dask-cloudprovider](https://cloudprovider.dask.org):
```python
DaskTaskRunner(
cluster_class="dask_cloudprovider.FargateCluster",
cluster_kwargs={
"image": "prefecthq/prefect:latest",
"n_workers": 5,
},
)
```
Connecting to an existing dask cluster:
```python
DaskTaskRunner(address="192.0.2.255:8786")
```
"""
def __init__(
self,
cluster: Optional[distributed.deploy.cluster.Cluster] = None,
address: Optional[str] = None,
cluster_class: Union[
str, Callable[[], distributed.deploy.cluster.Cluster], None
] = None,
cluster_kwargs: Optional[dict[str, Any]] = None,
adapt_kwargs: Optional[dict[str, Any]] = None,
client_kwargs: Optional[dict[str, Any]] = None,
performance_report_path: Optional[str] = None,
):
# Validate settings and infer defaults
resolved_cluster_class: distributed.deploy.cluster.Cluster | None = None
if address:
if cluster or cluster_class or cluster_kwargs or adapt_kwargs:
raise ValueError(
"Cannot specify `address` and "
"`cluster`/`cluster_class`/`cluster_kwargs`/`adapt_kwargs`"
)
elif cluster:
if cluster_class or cluster_kwargs:
raise ValueError(
"Cannot specify `cluster` and `cluster_class`/`cluster_kwargs`"
)
else:
if isinstance(cluster_class, str):
resolved_cluster_class = from_qualified_name(cluster_class)
elif isinstance(cluster_class, Callable):
# Store the callable itself, don't instantiate here
resolved_cluster_class = cluster_class
else:
resolved_cluster_class = cluster_class
# Create a copies of incoming kwargs since we may mutate them
cluster_kwargs = cluster_kwargs.copy() if cluster_kwargs else {}
adapt_kwargs = adapt_kwargs.copy() if adapt_kwargs else {}
client_kwargs = client_kwargs.copy() if client_kwargs else {}
# Update kwargs defaults
client_kwargs.setdefault("set_as_default", False)
# The user cannot specify async/sync themselves
if "asynchronous" in client_kwargs:
raise ValueError(
"`client_kwargs` cannot set `asynchronous`. "
"This option is managed by Prefect."
)
if "asynchronous" in cluster_kwargs:
raise ValueError(
"`cluster_kwargs` cannot set `asynchronous`. "
"This option is managed by Prefect."
)
# Store settings
self.address: str | None = address
self.cluster_class: (
str | Callable[[], distributed.deploy.cluster.Cluster] | None
) = cluster_class
self.resolved_cluster_class: distributed.deploy.cluster.Cluster | None = (
resolved_cluster_class
)
self.cluster_kwargs: dict[str, Any] = cluster_kwargs
self.adapt_kwargs: dict[str, Any] = adapt_kwargs
self.client_kwargs: dict[str, Any] = client_kwargs
self.performance_report_path: str | None = performance_report_path
# Runtime attributes
self._client: PrefectDaskClient | None = None
self._cluster: distributed.deploy.cluster.Cluster | None = cluster
self._exit_stack = ExitStack()
super().__init__()
def __eq__(self, other: object) -> bool:
"""
Check if an instance has the same settings as this task runner.
"""
if isinstance(other, DaskTaskRunner):
return (
self.address == other.address
and self.cluster_class == other.cluster_class
and self.cluster_kwargs == other.cluster_kwargs
and self.adapt_kwargs == other.adapt_kwargs
and self.client_kwargs == other.client_kwargs
)
else:
return False
@property
def client(self) -> PrefectDaskClient:
"""
Get the Dask client for the task runner.
The client is created on first access. If a remote cluster is not
provided, the client will attempt to create/connect to a local cluster.
"""
if not self._client:
in_dask = False
try:
client = distributed.get_client()
if client.cluster is not None:
self._cluster = client.cluster
elif client.scheduler is not None:
self.address = client.scheduler.address
else:
raise RuntimeError("No global client found and no address provided")
in_dask = True
except ValueError:
pass
if self._cluster:
self.logger.info(f"Connecting to existing Dask cluster {self._cluster}")
self._connect_to = self._cluster
if self.adapt_kwargs:
self._cluster.adapt(**self.adapt_kwargs)
elif self.address:
self.logger.info(
f"Connecting to an existing Dask cluster at {self.address}"
)
self._connect_to = self.address
else:
# Determine the cluster class to use
if self.resolved_cluster_class:
# Use the resolved class if a string was provided
class_to_instantiate = self.resolved_cluster_class
elif callable(self.cluster_class):
# Use the provided class object if it's callable
class_to_instantiate = self.cluster_class
else:
# Default to LocalCluster only if no specific class was provided or resolved
class_to_instantiate = distributed.LocalCluster
cluster_name = to_qualified_name(class_to_instantiate)
self.logger.info(f"Creating a new Dask cluster with `{cluster_name}`")
try:
self._connect_to = self._cluster = self._exit_stack.enter_context(
class_to_instantiate(**self.cluster_kwargs)
)
except Exception:
self.logger.error(
f"Failed to create {cluster_name} cluster: ", exc_info=True
)
raise
if self.adapt_kwargs:
# self._cluster should be non-None here after instantiation
if self._cluster:
maybe_coro = self._cluster.adapt(**self.adapt_kwargs)
if asyncio.iscoroutine(maybe_coro):
run_coro_as_sync(maybe_coro)
else:
# This case should ideally not happen if instantiation succeeded
self.logger.warning(
"Cluster object not found after instantiation, cannot apply adapt_kwargs."
)
self._client = self._exit_stack.enter_context(
PrefectDaskClient(self._connect_to, **self.client_kwargs)
)
if self.performance_report_path:
# Register our client as current so that it's found by distributed.get_client()
self._exit_stack.enter_context(
distributed.Client.as_current(self._client)
)
self._exit_stack.enter_context(
distributed.performance_report(self.performance_report_path)
)
if self._client.dashboard_link and not in_dask:
self.logger.info(
f"The Dask dashboard is available at {self._client.dashboard_link}",
)
return self._client
def duplicate(self):
"""
Create a new instance of the task runner with the same settings.
"""
return type(self)(
address=self.address,
cluster_class=self.cluster_class,
cluster_kwargs=self.cluster_kwargs,
adapt_kwargs=self.adapt_kwargs,
client_kwargs=self.client_kwargs,
performance_report_path=self.performance_report_path,
)
@overload
def submit(
self,
task: "Task[P, Coroutine[Any, Any, R]]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectDaskFuture[R]] | None = None,
dependencies: dict[str, Set[RunInput]] | None = None,
) -> PrefectDaskFuture[R]: ...
@overload
def submit(
self,
task: "Task[Any, R]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectDaskFuture[R]] | None = None,
dependencies: dict[str, Set[RunInput]] | None = None,
) -> PrefectDaskFuture[R]: ...
def submit(
self,
task: "Union[Task[P, R], Task[P, Coroutine[Any, Any, R]]]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectDaskFuture[R]] | None = None,
dependencies: dict[str, Set[RunInput]] | None = None,
) -> PrefectDaskFuture[R]:
if not self._started:
raise RuntimeError(
"The task runner must be started before submitting work."
)
# Convert both parameters and wait_for futures to Dask futures
parameters = self._optimize_futures(parameters)
wait_for = self._optimize_futures(wait_for) if wait_for else None
future = self.client.submit(
task,
parameters=parameters,
wait_for=wait_for,
dependencies=dependencies,
return_type="state",
)
return PrefectDaskFuture[R](
wrapped_future=future, task_run_id=future.task_run_id
)
@overload
def map(
self,
task: "Task[P, Coroutine[Any, Any, R]]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectFuture[Any]] | None = None,
) -> PrefectFutureList[PrefectDaskFuture[R]]: ...
@overload
def map(
self,
task: "Task[P, R]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectFuture[Any]] | None = None,
) -> PrefectFutureList[PrefectDaskFuture[R]]: ...
def map(
self,
task: "Task[P, R]",
parameters: dict[str, Any],
wait_for: Iterable[PrefectFuture[Any]] | None = None,
):
return super().map(task, parameters, wait_for)
def _optimize_futures(self, expr: PrefectDaskFuture[Any] | Any) -> Any:
def visit_fn(expr: Any) -> Any:
if isinstance(expr, PrefectDaskFuture):
return expr.wrapped_future
# Fallback to return the expression unaltered
return expr
return visit_collection(expr, visit_fn=visit_fn, return_data=True)
def __enter__(self):
"""
Start the task runner and create an exit stack to manage shutdown.
"""
super().__enter__()
self._exit_stack.__enter__()
return self
def __exit__(self, *args: Any) -> None:
self._exit_stack.__exit__(*args)
super().__exit__(*args)
| DaskTaskRunner |
python | kamyu104__LeetCode-Solutions | Python/happy-number.py | {
"start": 70,
"end": 443
} | class ____(object):
# @param {integer} n
# @return {boolean}
def isHappy(self, n):
lookup = {}
while n != 1 and n not in lookup:
lookup[n] = True
n = self.nextNumber(n)
return n == 1
def nextNumber(self, n):
new = 0
for char in str(n):
new += int(char)**2
return new
| Solution |
python | ethereum__web3.py | web3/types.py | {
"start": 13885,
"end": 14043
} | class ____(TypedDict):
blockStateCalls: Sequence[BlockStateCallV1]
validation: NotRequired[bool]
traceTransfers: NotRequired[bool]
| SimulateV1Payload |
python | dask__distributed | distributed/comm/ucx.py | {
"start": 899,
"end": 1633
} | class ____(BaseListener):
prefix = UCXConnector.prefix
comm_class = UCXConnector.comm_class
encrypted = UCXConnector.encrypted
def __init__(
self,
address: str,
comm_handler: Callable[[UCX], Awaitable[None]] | None = None,
deserialize: bool = False,
allow_offload: bool = True,
**connection_args: Any,
):
_raise_deprecated()
@property
def port(self):
return self.ucp_server.port
async def start(self):
_raise_deprecated()
def stop(self):
_raise_deprecated()
@property
def listen_address(self):
_raise_deprecated()
@property
def contact_address(self):
_raise_deprecated()
| UCXListener |
python | apache__airflow | providers/vertica/tests/unit/vertica/hooks/test_vertica.py | {
"start": 4779,
"end": 7527
} | class ____:
def setup_method(self):
self.cur = mock.MagicMock(rowcount=0)
self.cur.nextset.side_effect = [None]
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestVerticaHook(VerticaHook):
conn_name_attr = "test_conn_id"
def get_conn(self):
return conn
self.db_hook = UnitTestVerticaHook()
@patch("airflow.providers.common.sql.hooks.sql.DbApiHook.insert_rows")
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",), ("world",)]
target_fields = None
commit_every = 10
self.db_hook.insert_rows(table, rows, target_fields, commit_every)
mock_insert_rows.assert_called_once_with(table, rows, None, 10)
def test_get_first_record(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook.get_first(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = "SQL"
result_sets = [("row1",), ("row2",)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook.get_records(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_df_pandas(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_df(statement, df_type="pandas")
assert column == df.columns[0]
assert result_sets[0][0] == df.values.tolist()[0][0]
assert result_sets[1][0] == df.values.tolist()[1][0]
def test_get_df_polars(self):
statement = "SQL"
column = "col"
result_sets = [("row1",), ("row2",)]
mock_execute = mock.MagicMock()
mock_execute.description = [(column, None, None, None, None, None, None)]
mock_execute.fetchall.return_value = result_sets
self.cur.execute.return_value = mock_execute
df = self.db_hook.get_df(statement, df_type="polars")
self.cur.execute.assert_called_once_with(statement)
mock_execute.fetchall.assert_called_once_with()
assert column == df.columns[0]
assert result_sets[0][0] == df.row(0)[0]
assert result_sets[1][0] == df.row(1)[0]
| TestVerticaHook |
python | sqlalchemy__sqlalchemy | examples/inheritance/joined.py | {
"start": 1399,
"end": 1894
} | class ____(Person):
__tablename__ = "engineer"
id: Mapped[intpk] = mapped_column(ForeignKey("person.id"))
status: Mapped[str50]
engineer_name: Mapped[str50]
primary_language: Mapped[str50]
__mapper_args__ = {"polymorphic_identity": "engineer"}
def __repr__(self):
return (
f"Engineer {self.name}, status {self.status}, "
f"engineer_name {self.engineer_name}, "
f"primary_language {self.primary_language}"
)
| Engineer |
python | pytorch__pytorch | .github/scripts/runner_determinator.py | {
"start": 3455,
"end": 3603
} | class ____(NamedTuple):
"""
Settings for the experiments that can be opted into.
"""
experiments: dict[str, Experiment] = {}
| Settings |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 18317,
"end": 19373
} | class ____(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = "mapper", "target", "uselist", "entity"
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
self.mapper = self.parent_property.mapper
self.entity = self.parent_property.entity
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
def _immediateload_create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
return self.parent_property._get_strategy(
(("lazy", "immediate"),)
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
@log.class_logger
@relationships.RelationshipProperty.strategy_for(do_nothing=True)
| _AbstractRelationshipLoader |
python | google__pytype | pytype/rewrite/flow/frame_base_test.py | {
"start": 722,
"end": 1115
} | class ____(frame_base.FrameBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.seen_opcodes = []
# pylint: disable=invalid-name
def byte_FAKE_OP(self, op):
self.seen_opcodes.append(('FAKE_OP', op.index))
def byte_FAKE_OP_NO_NEXT(self, op):
self.seen_opcodes.append(('FAKE_OP_NO_NEXT', op.index))
# pylint: enable=invalid-name
| TestFrame |
python | getsentry__sentry | tests/sentry/incidents/test_charts.py | {
"start": 5492,
"end": 10759
} | class ____(BaseMetricIssueTest):
@freeze_time(frozen_time)
@with_feature("organizations:incidents")
def test_get_incidents_from_detector(self) -> None:
self.create_detector() # dummy so detector ID != alert rule ID
detector = self.create_detector(project=self.project)
alert_rule = self.create_alert_rule(organization=self.organization, projects=[self.project])
self.create_alert_rule_detector(detector=detector, alert_rule_id=alert_rule.id)
incident = self.create_incident(
date_started=must_parse_datetime("2022-05-16T18:55:00Z"),
status=IncidentStatus.CRITICAL.value,
alert_rule=alert_rule,
)
# create incident activity the same way we do in logic.py create_incident
detected_activity = self.create_incident_activity(
incident,
IncidentActivityType.DETECTED.value,
date_added=incident.date_started,
)
created_activity = self.create_incident_activity(
incident,
IncidentActivityType.CREATED.value,
)
time_period = incident_date_range(60, incident.date_started, incident.date_closed)
chart_data = fetch_metric_issue_open_periods(self.organization, detector.id, time_period)
assert chart_data[0]["alertRule"]["id"] == str(alert_rule.id)
assert chart_data[0]["projects"] == [self.project.slug]
assert chart_data[0]["dateStarted"] == incident.date_started
assert len(chart_data[0]["activities"]) == 2
detected_activity_resp = chart_data[0]["activities"][0]
created_activity_resp = chart_data[0]["activities"][1]
assert detected_activity_resp["incidentIdentifier"] == str(incident.identifier)
assert detected_activity_resp["type"] == IncidentActivityType.DETECTED.value
assert detected_activity_resp["dateCreated"] == detected_activity.date_added
assert created_activity_resp["incidentIdentifier"] == str(incident.identifier)
assert created_activity_resp["type"] == IncidentActivityType.CREATED.value
assert created_activity_resp["dateCreated"] == created_activity.date_added
@freeze_time(frozen_time)
@with_feature("organizations:incidents")
@with_feature("organizations:new-metric-issue-charts")
@with_feature("organizations:workflow-engine-single-process-metric-issues")
def test_use_open_period_serializer(self) -> None:
detector = self.create_detector(project=self.project)
group = self.create_group(type=MetricIssue.type_id, priority=PriorityLevel.HIGH)
# Link detector to group
DetectorGroup.objects.create(detector=detector, group=group)
group_open_period = GroupOpenPeriod.objects.get(group=group)
opened_gopa = GroupOpenPeriodActivity.objects.create(
date_added=group_open_period.date_added,
group_open_period=group_open_period,
type=OpenPeriodActivityType.OPENED,
value=group.priority,
)
time_period = incident_date_range(
60, group_open_period.date_started, group_open_period.date_ended
)
chart_data = fetch_metric_issue_open_periods(self.organization, detector.id, time_period)
assert chart_data[0]["id"] == str(group_open_period.id)
assert chart_data[0]["start"] == group_open_period.date_started
activities = chart_data[0]["activities"]
assert activities[0]["id"] == str(opened_gopa.id)
assert activities[0]["type"] == OpenPeriodActivityType(opened_gopa.type).to_str()
assert activities[0]["value"] == PriorityLevel(group.priority).to_str()
@freeze_time(frozen_time)
@with_feature("organizations:incidents")
@with_feature("organizations:new-metric-issue-charts")
def test_use_open_period_serializer_with_offset(self) -> None:
group = self.create_group(type=MetricIssue.type_id, priority=PriorityLevel.HIGH)
# Link detector to group
DetectorGroup.objects.create(detector=self.detector, group=group)
group_open_period = GroupOpenPeriod.objects.get(group=group)
opened_gopa = GroupOpenPeriodActivity.objects.create(
date_added=group_open_period.date_added,
group_open_period=group_open_period,
type=OpenPeriodActivityType.OPENED,
value=group.priority,
)
time_period = incident_date_range(
60, group_open_period.date_started, group_open_period.date_ended
)
chart_data = fetch_metric_issue_open_periods(
self.organization,
self.detector.id,
time_period,
time_window=self.snuba_query.time_window,
)
assert chart_data[0]["id"] == str(group_open_period.id)
assert chart_data[0]["start"] == calculate_event_date_from_update_date(
group_open_period.date_started, self.snuba_query.time_window
)
activities = chart_data[0]["activities"]
assert activities[0]["id"] == str(opened_gopa.id)
assert activities[0]["type"] == OpenPeriodActivityType(opened_gopa.type).to_str()
assert activities[0]["value"] == PriorityLevel(group.priority).to_str()
| FetchOpenPeriodsTest |
python | getsentry__sentry | src/sentry/grouping/variants.py | {
"start": 5764,
"end": 6640
} | class ____(BaseVariant):
"""A user-defined custom fingerprint."""
type = "custom_fingerprint"
def __init__(self, fingerprint: list[str], fingerprint_info: FingerprintInfo):
self.values = fingerprint
self.fingerprint_info = fingerprint_info
self.is_built_in = fingerprint_info.get("matched_rule", {}).get("is_builtin", False)
@property
def description(self) -> str:
return "Sentry defined fingerprint" if self.is_built_in else "custom fingerprint"
@property
def key(self) -> str:
return "built_in_fingerprint" if self.is_built_in else "custom_fingerprint"
def get_hash(self) -> str | None:
return hash_from_values(self.values)
def _get_metadata_as_dict(self) -> FingerprintVariantMetadata:
return expose_fingerprint_dict(self.values, self.fingerprint_info)
| CustomFingerprintVariant |
python | facebook__pyre-check | tools/pysa_integration_tests/annotations.py | {
"start": 462,
"end": 766
} | class ____:
def __new__(
cls,
*,
code: int,
line: Optional[int] = None,
task: Optional[str] = None,
currently_found: bool = True,
) -> "ExpectIssue":
return super().__new__(cls)
def __call__(self, f: T) -> T:
return f
| ExpectIssue |
python | ansible__ansible | test/integration/targets/callback-dispatch/callback_plugins/oops_always_enabled.py | {
"start": 118,
"end": 656
} | class ____(CallbackBase):
call_count: t.ClassVar[int] = 0
def v2_runner_on_ok(self, *args, **kwargs) -> None:
print(f"hello from ALWAYS ENABLED v2_runner_on_ok {args=} {kwargs=}")
CallbackModule.call_count += 1
def v2_playbook_on_stats(self, stats):
print('hello from ALWAYS ENABLED v2_playbook_on_stats')
if os.environ.get('_ASSERT_OOPS'):
assert CallbackModule.call_count < 2, "always enabled callback should not "
print("no double callbacks test PASS")
| CallbackModule |
python | PyCQA__pylint | tests/functional/u/unsupported/unsupported_version_for_final.py | {
"start": 611,
"end": 794
} | class ____:
@myfinal # [using-final-decorator-in-unsupported-version]
def my_method(self):
pass
@typing.final # [using-final-decorator-in-unsupported-version]
| MyClass2 |
python | tensorflow__tensorflow | tensorflow/python/ops/control_flow_v2_func_graphs.py | {
"start": 1618,
"end": 1784
} | class ____(ControlFlowFuncGraph):
"""FuncGraph for branches of tf.cond().
This is used to distinguish cond branches from other functions.
"""
| CondBranchFuncGraph |
python | doocs__leetcode | solution/0600-0699/0630.Course Schedule III/Solution.py | {
"start": 0,
"end": 335
} | class ____:
def scheduleCourse(self, courses: List[List[int]]) -> int:
courses.sort(key=lambda x: x[1])
pq = []
s = 0
for duration, last in courses:
heappush(pq, -duration)
s += duration
while s > last:
s += heappop(pq)
return len(pq)
| Solution |
python | huggingface__transformers | src/transformers/models/ovis2/modeling_ovis2.py | {
"start": 27849,
"end": 33673
} | class ____(Ovis2PreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: Ovis2Config):
super().__init__(config)
self.model = Ovis2Model(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def get_image_features(self, pixel_values: torch.FloatTensor):
return self.model.get_image_features(pixel_values=pixel_values)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, Ovis2CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Ovis2ForConditionalGeneration
>>> model = Ovis2ForConditionalGeneration.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> prompt = "<|im_start|>user\n<image>\nDescribe the image.<|im_end|>\n<|im_start|>assistant\n"
>>> url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=15)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0]
"user\n\nDescribe the image.\nassistant\nThe image features a brown dog standing on a wooden floor, looking up with"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return Ovis2CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = ["Ovis2PreTrainedModel", "Ovis2Model", "Ovis2ForConditionalGeneration"]
| Ovis2ForConditionalGeneration |
python | kamyu104__LeetCode-Solutions | Python/apply-discount-every-n-orders.py | {
"start": 131,
"end": 899
} | class ____(object):
def __init__(self, n, discount, products, prices):
"""
:type n: int
:type discount: int
:type products: List[int]
:type prices: List[int]
"""
self.__n = n
self.__discount = discount
self.__curr = 0
self.__lookup = {p : prices[i] for i, p in enumerate(products)}
def getBill(self, product, amount):
"""
:type product: List[int]
:type amount: List[int]
:rtype: float
"""
self.__curr = (self.__curr+1) % self.__n
result = 0.0
for i, p in enumerate(product):
result += self.__lookup[p]*amount[i]
return result * (1.0 - self.__discount/100.0 if self.__curr == 0 else 1.0)
| Cashier |
python | walkccc__LeetCode | solutions/398. Random Pick Index/398.py | {
"start": 0,
"end": 298
} | class ____:
def __init__(self, nums: list[int]):
self.nums = nums
def pick(self, target: int) -> int:
ans = -1
rng = 0
for i, num in enumerate(self.nums):
if num == target:
rng += 1
if random.randint(0, rng - 1) == 0:
ans = i
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/design-a-number-container-system.py | {
"start": 140,
"end": 939
} | class ____(object):
def __init__(self):
self.__idx_to_num = {}
self.__num_to_idxs = collections.defaultdict(SortedList)
def change(self, index, number):
"""
:type index: int
:type number: int
:rtype: None
"""
if index in self.__idx_to_num:
self.__num_to_idxs[self.__idx_to_num[index]].remove(index)
if not self.__num_to_idxs[self.__idx_to_num[index]]:
del self.__num_to_idxs[self.__idx_to_num[index]]
self.__idx_to_num[index] = number
self.__num_to_idxs[number].add(index)
def find(self, number):
"""
:type number: int
:rtype: int
"""
return self.__num_to_idxs[number][0] if number in self.__num_to_idxs else -1
| NumberContainers |
python | ray-project__ray | python/ray/train/tests/lightning_test_utils.py | {
"start": 3015,
"end": 3677
} | class ____(pl.LightningDataModule):
def __init__(self, batch_size: int = 8, dataset_size: int = 256) -> None:
super().__init__()
self.batch_size = batch_size
self.train_data = torch.randn(dataset_size, 32)
self.val_data = torch.randn(dataset_size, 32)
self.test_data = torch.randn(dataset_size, 32)
def train_dataloader(self):
return DataLoader(self.train_data, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.val_data, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.test_data, batch_size=self.batch_size)
| DummyDataModule |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 733789,
"end": 734454
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("created_at", "key", "read_only", "title", "verified")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
key = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="key")
read_only = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="readOnly"
)
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
verified = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="verified")
| DeployKey |
python | mlflow__mlflow | dev/build.py | {
"start": 162,
"end": 3094
} | class ____:
# name of the package on PyPI.
pypi_name: str
# type of the package, one of "dev", "skinny", "tracing", "release"
type: str
# path to the package relative to the root of the repository
build_path: str
DEV = Package("mlflow", "dev", ".")
RELEASE = Package("mlflow", "release", ".")
SKINNY = Package("mlflow-skinny", "skinny", "libs/skinny")
TRACING = Package("mlflow-tracing", "tracing", "libs/tracing")
PACKAGES = [
DEV,
SKINNY,
RELEASE,
TRACING,
]
def parse_args():
parser = argparse.ArgumentParser(description="Build MLflow package.")
parser.add_argument(
"--package-type",
help="Package type to build. Default is 'dev'.",
choices=[p.type for p in PACKAGES],
default="dev",
)
parser.add_argument(
"--sha",
help="If specified, include the SHA in the wheel name as a build tag.",
)
return parser.parse_args()
@contextlib.contextmanager
def restore_changes():
try:
yield
finally:
subprocess.check_call(
[
"git",
"restore",
"README.md",
"pyproject.toml",
]
)
def main():
args = parse_args()
# Clean up build artifacts generated by previous builds
paths_to_clean_up = ["build"]
for pkg in PACKAGES:
paths_to_clean_up += [
f"{pkg.build_path}/dist",
f"{pkg.build_path}/{pkg.pypi_name}.egg_info",
]
for path in map(Path, paths_to_clean_up):
if not path.exists():
continue
if path.is_file():
path.unlink()
else:
shutil.rmtree(path)
package = next(p for p in PACKAGES if p.type == args.package_type)
with restore_changes():
pyproject = Path("pyproject.toml")
if package == RELEASE:
pyproject.write_text(Path("pyproject.release.toml").read_text())
subprocess.check_call(
[
sys.executable,
"-m",
"build",
package.build_path,
]
)
DIST_DIR = Path("dist")
DIST_DIR.mkdir(exist_ok=True)
if package in (SKINNY, TRACING):
# Move `libs/xyz/dist/*` to `dist/`
for src in (Path(package.build_path) / "dist").glob("*"):
print(src)
dst = DIST_DIR / src.name
if dst.exists():
dst.unlink()
src.rename(dst)
if args.sha:
# If build succeeds, there should be one wheel in the dist directory
wheel = next(DIST_DIR.glob("mlflow*.whl"))
name, version, rest = wheel.name.split("-", 2)
build_tag = f"0.sha.{args.sha}" # build tag must start with a digit
wheel.rename(wheel.with_name(f"{name}-{version}-{build_tag}-{rest}"))
if __name__ == "__main__":
main()
| Package |
python | allegroai__clearml | clearml/backend_interface/base.py | {
"start": 615,
"end": 6315
} | class ____(SessionInterface):
"""Base class for a backend manager class"""
_default_session = None
_num_retry_warning_display = 1
_offline_mode = ENV_OFFLINE_MODE.get()
_JSON_EXCEPTION = (
(jsonschema.ValidationError, requests.exceptions.InvalidJSONError)
if hasattr(requests.exceptions, "InvalidJSONError")
else (jsonschema.ValidationError,)
)
@property
def session(self) -> Session:
return self._session
@property
def log(self) -> logging.Logger:
return self._log
def __init__(self, session: Session = None, log: logging.Logger = None, **kwargs: Any) -> None:
super(InterfaceBase, self).__init__()
self._session = session or self._get_default_session()
self._log = log or self._create_log()
def _create_log(self) -> logging.Logger:
log = get_logger(str(self.__class__.__name__))
try:
log.setLevel(LOG_LEVEL_ENV_VAR.get(default=log.level))
except TypeError as ex:
raise ValueError("Invalid log level defined in environment variable `%s`: %s" % (LOG_LEVEL_ENV_VAR, ex))
return log
@classmethod
def _send(
cls,
session: Session,
req: BatchRequest,
ignore_errors: bool = False,
raise_on_errors: bool = True,
log: logging.Logger = None,
async_enable: bool = False,
) -> CallResult:
"""Convenience send() method providing a standardized error reporting"""
if cls._offline_mode:
return None
num_retries = 0
while True:
error_msg = ""
try:
res = session.send(req, async_enable=async_enable)
if res.meta.result_code in (200, 202) or ignore_errors:
return res
if isinstance(req, BatchRequest):
error_msg = "Action failed %s" % res.meta
else:
error_msg = "Action failed %s (%s)" % (
res.meta,
", ".join("%s=%s" % p for p in req.to_dict().items()),
)
if log:
log.error(error_msg)
except requests.exceptions.BaseHTTPError as e:
res = None
if log and num_retries >= cls._num_retry_warning_display:
log.warning("Retrying, previous request failed %s: %s" % (str(type(req)), str(e)))
except MaxRequestSizeError as e:
res = CallResult(meta=ResponseMeta.from_raw_data(status_code=400, text=str(e)))
error_msg = "Failed sending: %s" % str(e)
except requests.exceptions.ConnectionError as e:
# We couldn't send the request for more than the retries times configure in the api configuration file,
# so we will end the loop and raise the exception to the upper level.
# Notice: this is a connectivity error and not a backend error.
# if raise_on_errors:
# raise
res = None
if log and num_retries >= cls._num_retry_warning_display:
log.warning("Retrying, previous request failed %s: %s" % (str(type(req)), str(e)))
except cls._JSON_EXCEPTION as e:
if log:
log.error(
"Field %s contains illegal schema: %s",
".".join(e.path),
str(e.message),
)
if raise_on_errors:
raise ValidationError("Field %s contains illegal schema: %s" % (".".join(e.path), e.message))
# We do not want to retry
return None
except Exception as e:
import traceback
traceback.print_exc()
res = None
if log and num_retries >= cls._num_retry_warning_display:
log.warning("Retrying, previous request failed %s: %s" % (str(type(req)), str(e)))
if res and res.meta.result_code <= 500:
# Proper backend error/bad status code - raise or return
if raise_on_errors:
raise SendError(res, error_msg)
return res
num_retries += 1
def send(
self,
req: BatchRequest,
ignore_errors: bool = False,
raise_on_errors: bool = True,
async_enable: bool = False,
) -> CallResult:
return self._send(
session=self.session,
req=req,
ignore_errors=ignore_errors,
raise_on_errors=raise_on_errors,
log=self.log,
async_enable=async_enable,
)
@classmethod
def _get_default_session(cls) -> Session:
if not InterfaceBase._default_session:
InterfaceBase._default_session = Session(
initialize_logging=False,
config=config_obj,
api_key=ENV_ACCESS_KEY.get(),
secret_key=ENV_SECRET_KEY.get(),
)
return InterfaceBase._default_session
@classmethod
def _set_default_session(cls, session: Session) -> None:
"""
Set a new default session to the system
Warning: Use only for debug and testing
:param session: The new default session
"""
InterfaceBase._default_session = session
@property
def default_session(self) -> Session:
if hasattr(self, "_session"):
return self._session
return self._get_default_session()
@six.add_metaclass(abc.ABCMeta)
| InterfaceBase |
python | walkccc__LeetCode | solutions/1568. Minimum Number of Days to Disconnect Island/1568.py | {
"start": 0,
"end": 1132
} | class ____:
def minDays(self, grid: list[list[int]]) -> int:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
m = len(grid)
n = len(grid[0])
def dfs(grid: list[list[int]], i: int, j: int, seen: set[tuple[int, int]]):
seen.add((i, j))
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x == m or y < 0 or y == n:
continue
if grid[x][y] == 0 or (x, y) in seen:
continue
dfs(grid, x, y, seen)
def disconnected(grid: list[list[int]]) -> bool:
islandsCount = 0
seen = set()
for i in range(m):
for j in range(n):
if grid[i][j] == 0 or (i, j) in seen:
continue
if islandsCount > 1:
return True
islandsCount += 1
dfs(grid, i, j, seen)
return islandsCount != 1
if disconnected(grid):
return 0
# Try to remove 1 land.
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
grid[i][j] = 0
if disconnected(grid):
return 1
grid[i][j] = 1
# Remove 2 lands.
return 2
| Solution |
python | simplejson__simplejson | simplejson/tests/test_for_json.py | {
"start": 293,
"end": 372
} | class ____(dict):
def for_json(self):
return {'alpha': 1}
| DictForJson |
python | kamyu104__LeetCode-Solutions | Python/subtree-removal-game-with-fibonacci-tree.py | {
"start": 1072,
"end": 1465
} | class ____(object):
def findGameWinner(self, n):
"""
:type n: int
:rtype: bool
"""
grundy = [0, 1] # 0-indexed
for i in xrange(2, n):
grundy[i%2] = (grundy[(i-1)%2]+1)^(grundy[(i-2)%2]+1) # colon principle, replace the branches by a non-branching stalk of length equal to their nim sum
return grundy[(n-1)%2] > 0
| Solution2 |
python | celery__celery | celery/exceptions.py | {
"start": 7529,
"end": 7633
} | class ____(TaskError):
"""The task has invalid data or ain't properly constructed."""
| InvalidTaskError |
python | Netflix__metaflow | metaflow/_vendor/packaging/_parser.py | {
"start": 1148,
"end": 9399
} | class ____(NamedTuple):
name: str
url: str
extras: List[str]
specifier: str
marker: Optional[MarkerList]
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> Tuple[str, str, Optional[MarkerList]]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens("LEFT_BRACKET", "RIGHT_BRACKET"):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: List[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens("LEFT_PARENTHESIS", "RIGHT_PARENTHESIS"):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if (
env_var == "platform_python_implementation"
or env_var == "python_implementation"
):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of "
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
| ParsedRequirement |
python | imageio__imageio | tests/test_core.py | {
"start": 849,
"end": 939
} | class ____:
def __init__(self, request):
"""Can read anything"""
| EpicDummyPlugin |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 52431,
"end": 58562
} | class ____(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.FloatTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, EntityClassificationOutput]:
r"""
entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
Indices of entity tokens in the entity vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
- 1 for entity tokens that are **not masked**,
- 0 for entity tokens that are **masked**.
entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Segment token indices to indicate first and second portions of the entity token inputs. Indices are
selected in `[0, 1]`:
- 0 corresponds to a *portion A* entity token,
- 1 corresponds to a *portion B* entity token.
entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
used for the single-label classification. In this case, labels should contain the indices that should be in
`[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
and 1 indicate false and true, respectively.
Examples:
```python
>>> from transformers import AutoTokenizer, LukeForEntityClassification
>>> tokenizer = AutoTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
>>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: person
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
feature_vector = outputs.entity_last_hidden_state[:, 0, :]
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
# move labels to correct device
labels = labels.to(logits.device)
if labels.ndim == 1:
loss = nn.functional.cross_entropy(logits, labels)
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
return tuple(
v
for v in [loss, logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions]
if v is not None
)
return EntityClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity
tokens) for entity pair classification tasks, such as TACRED.
"""
)
| LukeForEntityClassification |
python | joke2k__faker | faker/providers/job/vi_VN/__init__.py | {
"start": 41,
"end": 1950
} | class ____(JobProvider):
"""Translated from Super class"""
jobs = (
# Information technology field
"Lập trình viên",
"Kỹ sư phần mềm",
"Kiến trúc sư phần mềm",
"Nhà phân tích dữ liệu",
"Chuyên viên bảo mật",
"Tester",
"DevOps Engineer",
"Project Manager",
"UX/UI Designer",
"Digital Marketer",
"Thực Tập",
# Finance - banking sector
"Nhân viên ngân hàng",
"Chuyên viên tín dụng",
"Kế toán",
"Kiểm toán",
"Nhà tư vấn tài chính",
"Chuyên viên phân tích thị trường",
# Business areas
"Giám đốc kinh doanh",
"Trưởng phòng kinh doanh",
"Nhân viên kinh doanh",
"Marketing Manager",
"Sales Representative",
"Chuyên viên bán hàng trực tuyến",
# Education Department
"Giáo viên",
"Giảng viên",
"Chuyên viên tư vấn tuyển sinh",
"Thực tập sinh giáo dục",
# Medical
"Bác sĩ",
"Y tá",
"Dược sĩ",
"Điều Dưỡng",
# Building sector
"Kỹ sư xây dựng",
"Kiến trúc sư",
"Thợ xây",
"Kỹ sư giám sát",
# Service sector
"Nhân viên khách sạn",
"Nhân viên nhà hàng",
"Tư vấn khách hàng",
"Nhân viên lễ tân",
# Manufacturing sector
"Công nhân sản xuất",
"Kỹ sư sản xuất",
"Quản lý sản xuất",
# Agriculture sector
"Nông dân",
"Kỹ sư nông nghiệp",
# Law field
"Luật sư",
"Thư ký pháp lý",
# Other areas
"Nhà báo",
"Biên dịch viên",
"Nghệ sĩ",
"Nhà thiết kế đồ họa",
"Nhân viên hành chính",
"Chuyên viên nhân sự",
"Nhân Viên Bán Hàng",
)
def job(self) -> str:
return self.random_element(self.jobs)
| Provider |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/react/types.py | {
"start": 867,
"end": 1254
} | class ____(BaseReasoningStep):
"""Observation reasoning step."""
observation: str
return_direct: bool = False
def get_content(self) -> str:
"""Get content."""
return f"Observation: {self.observation}"
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return self.return_direct
| ObservationReasoningStep |
python | psf__black | src/black/mode.py | {
"start": 673,
"end": 6727
} | class ____(Enum):
F_STRINGS = 2
NUMERIC_UNDERSCORES = 3
TRAILING_COMMA_IN_CALL = 4
TRAILING_COMMA_IN_DEF = 5
# The following two feature-flags are mutually exclusive, and exactly one should be
# set for every version of python.
ASYNC_IDENTIFIERS = 6
ASYNC_KEYWORDS = 7
ASSIGNMENT_EXPRESSIONS = 8
POS_ONLY_ARGUMENTS = 9
RELAXED_DECORATORS = 10
PATTERN_MATCHING = 11
UNPACKING_ON_FLOW = 12
ANN_ASSIGN_EXTENDED_RHS = 13
EXCEPT_STAR = 14
VARIADIC_GENERICS = 15
DEBUG_F_STRINGS = 16
PARENTHESIZED_CONTEXT_MANAGERS = 17
TYPE_PARAMS = 18
# FSTRING_PARSING = 19 # unused
TYPE_PARAM_DEFAULTS = 20
UNPARENTHESIZED_EXCEPT_TYPES = 21
T_STRINGS = 22
FORCE_OPTIONAL_PARENTHESES = 50
# __future__ flags
FUTURE_ANNOTATIONS = 51
FUTURE_FLAG_TO_FEATURE: Final = {
"annotations": Feature.FUTURE_ANNOTATIONS,
}
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY36: {
Feature.F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_IDENTIFIERS,
},
TargetVersion.PY37: {
Feature.F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
},
TargetVersion.PY38: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
},
TargetVersion.PY39: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
},
TargetVersion.PY310: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
},
TargetVersion.PY311: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
},
TargetVersion.PY312: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
},
TargetVersion.PY313: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.TYPE_PARAM_DEFAULTS,
},
TargetVersion.PY314: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.TYPE_PARAM_DEFAULTS,
Feature.UNPARENTHESIZED_EXCEPT_TYPES,
Feature.T_STRINGS,
},
}
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
if not target_versions:
raise ValueError("target_versions must not be empty")
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
| Feature |
python | langchain-ai__langchain | libs/partners/prompty/langchain_prompty/core.py | {
"start": 247,
"end": 346
} | class ____(BaseModel, Generic[T]):
"""Simple model for a single item."""
item: T
| SimpleModel |
python | django__django | tests/admin_inlines/models.py | {
"start": 521,
"end": 887
} | class ____(models.Model):
name = models.CharField(max_length=50)
teacher = models.ForeignKey(Teacher, models.CASCADE)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey()
def __str__(self):
return "I am %s, a child of %s" % (self.name, self.parent)
| Child |
python | huggingface__transformers | src/transformers/models/apertus/modular_apertus.py | {
"start": 13245,
"end": 13288
} | class ____(LlamaModel):
pass
| ApertusModel |
python | joke2k__faker | faker/providers/internet/el_GR/__init__.py | {
"start": 108,
"end": 2284
} | class ____(InternetProvider):
free_email_domains = (
"hol.gr",
"gmail.com",
"hotmail.gr",
"yahoo.gr",
"googlemail.gr",
"otenet.gr",
"forthnet.gr",
)
tlds = ("com", "com", "com", "net", "org", "gr", "gr", "gr")
@slugify_domain
def user_name(self) -> str:
pattern: str = self.random_element(self.user_name_formats)
return latinize(self.bothify(self.generator.parse(pattern)))
@slugify_domain
def domain_word(self) -> str:
company = self.generator.format("company")
company_elements = company.split(" ")
company = latinize(company_elements.pop(0))
return company
# ``slugify`` doesn't replace greek glyphs.
def remove_accents(value: str) -> str:
"""
Remove accents from characters in the given string.
"""
search = "ΆΈΉΊΌΎΏάέήίόύώΪϊΐϋΰ"
replace = "ΑΕΗΙΟΥΩαεηιουωΙιιυυ"
def replace_accented_character(match):
matched = match.group(0)
if matched in search:
return replace[search.find(matched)]
return matched
return re.sub(rf"[{search}]+", replace_accented_character, value)
def latinize(value: str) -> str:
"""
Converts (transliterates) greek letters to latin equivalents.
"""
def replace_double_character(match):
search = ("Θ Χ Ψ " "θ χ ψ " "ΟΥ ΑΥ ΕΥ " "Ου Αυ Ευ " "ου αυ ευ").split()
replace = ("TH CH PS " "th ch ps " "OU AU EU " "Ou Au Eu " "ou au eu").split()
matched = match.group(0)
if matched in search:
return replace[search.index(matched)]
return matched
search = "ΑΒΓΔΕΖΗΙΚΛΜΝΞΟΠΡΣΣΤΥΦΩαβγδεζηικλμνξοπρσςτυφω"
replace = "AVGDEZIIKLMNXOPRSSTUFOavgdeziiklmnxoprsstyfo"
def replace_greek_character(match):
matched = list(match.group(0))
value = (replace[search.find(char)] for char in matched)
return "".join(value)
return re.sub(
rf"[{search}]+",
replace_greek_character,
re.sub(
r"([ΘΧΨθχψ]+|ΟΥ|ΑΥ|ΕΥ|Ου|Αυ|Ευ|ου|αυ|ευ)",
replace_double_character,
remove_accents(value),
),
)
| Provider |
python | mahmoud__glom | glom/matching.py | {
"start": 28455,
"end": 35174
} | class ____:
"""Check objects are used to make assertions about the target data,
and either pass through the data or raise exceptions if there is a
problem.
If any check condition fails, a :class:`~glom.CheckError` is raised.
Args:
spec: a sub-spec to extract the data to which other assertions will
be checked (defaults to applying checks to the target itself)
type: a type or sequence of types to be checked for exact match
equal_to: a value to be checked for equality match ("==")
validate: a callable or list of callables, each representing a
check condition. If one or more return False or raise an
exception, the Check will fail.
instance_of: a type or sequence of types to be checked with isinstance()
one_of: an iterable of values, any of which can match the target ("in")
default: an optional default value to replace the value when the check fails
(if default is not specified, GlomCheckError will be raised)
Aside from *spec*, all arguments are keyword arguments. Each
argument, except for *default*, represent a check
condition. Multiple checks can be passed, and if all check
conditions are left unset, Check defaults to performing a basic
truthy check on the value.
"""
# TODO: the next level of Check would be to play with the Scope to
# allow checking to continue across the same level of
# dictionary. Basically, collect as many errors as possible before
# raising the unified CheckError.
def __init__(self, spec=T, **kwargs):
self.spec = spec
self._orig_kwargs = dict(kwargs)
self.default = kwargs.pop('default', RAISE)
def _get_arg_val(name, cond, func, val, can_be_empty=True):
if val is _MISSING:
return ()
if not is_iterable(val):
val = (val,)
elif not val and not can_be_empty:
raise ValueError('expected %r argument to contain at least one value,'
' not: %r' % (name, val))
for v in val:
if not func(v):
raise ValueError('expected %r argument to be %s, not: %r'
% (name, cond, v))
return val
# if there are other common validation functions, maybe a
# small set of special strings would work as valid arguments
# to validate, too.
def truthy(val):
return bool(val)
validate = kwargs.pop('validate', _MISSING if kwargs else truthy)
type_arg = kwargs.pop('type', _MISSING)
instance_of = kwargs.pop('instance_of', _MISSING)
equal_to = kwargs.pop('equal_to', _MISSING)
one_of = kwargs.pop('one_of', _MISSING)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % kwargs.keys())
self.validators = _get_arg_val('validate', 'callable', callable, validate)
self.instance_of = _get_arg_val('instance_of', 'a type',
lambda x: isinstance(x, type), instance_of, False)
self.types = _get_arg_val('type', 'a type',
lambda x: isinstance(x, type), type_arg, False)
if equal_to is not _MISSING:
self.vals = (equal_to,)
if one_of is not _MISSING:
raise TypeError('expected "one_of" argument to be unset when'
' "equal_to" argument is passed')
elif one_of is not _MISSING:
if not is_iterable(one_of):
raise ValueError('expected "one_of" argument to be iterable'
' , not: %r' % one_of)
if not one_of:
raise ValueError('expected "one_of" to contain at least'
' one value, not: %r' % (one_of,))
self.vals = one_of
else:
self.vals = ()
return
class _ValidationError(Exception):
"for internal use inside of Check only"
pass
def glomit(self, target, scope):
ret = target
errs = []
if self.spec is not T:
target = scope[glom](target, self.spec, scope)
if self.types and type(target) not in self.types:
if self.default is not RAISE:
return arg_val(target, self.default, scope)
errs.append('expected type to be %r, found type %r' %
(self.types[0].__name__ if len(self.types) == 1
else tuple([t.__name__ for t in self.types]),
type(target).__name__))
if self.vals and target not in self.vals:
if self.default is not RAISE:
return arg_val(target, self.default, scope)
if len(self.vals) == 1:
errs.append(f"expected {self.vals[0]}, found {target}")
else:
errs.append(f'expected one of {self.vals}, found {target}')
if self.validators:
for i, validator in enumerate(self.validators):
try:
res = validator(target)
if res is False:
raise self._ValidationError
except Exception as e:
msg = ('expected %r check to validate target'
% getattr(validator, '__name__', None) or ('#%s' % i))
if type(e) is self._ValidationError:
if self.default is not RAISE:
return self.default
else:
msg += ' (got exception: %r)' % e
errs.append(msg)
if self.instance_of and not isinstance(target, self.instance_of):
# TODO: can these early returns be done without so much copy-paste?
# (early return to avoid potentially expensive or even error-causeing
# string formats)
if self.default is not RAISE:
return arg_val(target, self.default, scope)
errs.append('expected instance of %r, found instance of %r' %
(self.instance_of[0].__name__ if len(self.instance_of) == 1
else tuple([t.__name__ for t in self.instance_of]),
type(target).__name__))
if errs:
raise CheckError(errs, self, scope[Path])
return ret
def __repr__(self):
cn = self.__class__.__name__
posargs = (self.spec,) if self.spec is not T else ()
return format_invocation(cn, posargs, self._orig_kwargs, repr=bbrepr)
| Check |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 674929,
"end": 675671
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for IssueComment."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("IssueCommentEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("IssueComment"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| IssueCommentConnection |
python | plotly__plotly.py | plotly/graph_objs/isosurface/caps/_y.py | {
"start": 233,
"end": 4043
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.caps"
_path_str = "isosurface.caps.y"
_valid_props = {"fill", "show"}
@property
def fill(self):
"""
Sets the fill ratio of the `caps`. The default fill value of
the `caps` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def show(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the y `slices` is 1 meaning that they are entirely shaded. On
the other hand Applying a `fill` ratio less than one would
allow the creation of openings parallel to the edges.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the y `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
"""
def __init__(self, arg=None, fill=None, show=None, **kwargs):
"""
Construct a new Y object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.caps.Y`
fill
Sets the fill ratio of the `caps`. The default fill
value of the `caps` is 1 meaning that they are entirely
shaded. On the other hand Applying a `fill` ratio less
than one would allow the creation of openings parallel
to the edges.
show
Sets the fill ratio of the `slices`. The default fill
value of the y `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
Returns
-------
Y
"""
super().__init__("y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.caps.Y
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.caps.Y`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("fill", arg, fill)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Y |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 255563,
"end": 257097
} | class ____(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def setUpModule():
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| SendRecvFdsTests |
python | pandas-dev__pandas | asv_bench/benchmarks/boolean.py | {
"start": 42,
"end": 739
} | class ____:
def setup(self):
N = 10_000
left, right, lmask, rmask = np.random.randint(0, 2, size=(4, N)).astype("bool")
self.left = pd.arrays.BooleanArray(left, lmask)
self.right = pd.arrays.BooleanArray(right, rmask)
def time_or_scalar(self):
self.left | True
self.left | False
def time_or_array(self):
self.left | self.right
def time_and_scalar(self):
self.left & True
self.left & False
def time_and_array(self):
self.left & self.right
def time_xor_scalar(self):
self.left ^ True
self.left ^ False
def time_xor_array(self):
self.left ^ self.right
| TimeLogicalOps |
python | kamyu104__LeetCode-Solutions | Python/best-time-to-buy-and-sell-stock-iii.py | {
"start": 1144,
"end": 2193
} | class ____(object):
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
min_price, max_profit_from_left, max_profits_from_left = \
float("inf"), 0, []
for price in prices:
min_price = min(min_price, price)
max_profit_from_left = max(max_profit_from_left, price - min_price)
max_profits_from_left.append(max_profit_from_left)
max_price, max_profit_from_right, max_profits_from_right = 0, 0, []
for i in reversed(range(len(prices))):
max_price = max(max_price, prices[i])
max_profit_from_right = max(max_profit_from_right,
max_price - prices[i])
max_profits_from_right.insert(0, max_profit_from_right)
max_profit = 0
for i in range(len(prices)):
max_profit = max(max_profit,
max_profits_from_left[i] +
max_profits_from_right[i])
return max_profit
| Solution3 |
python | getsentry__sentry | tests/sentry/users/api/bases/test_user.py | {
"start": 544,
"end": 3767
} | class ____(DRFPermissionTestCase):
user_permission = UserPermission()
def setUp(self) -> None:
super().setUp()
self.normal_user = self.create_user()
def test_allows_none_user_as_anonymous(self) -> None:
assert self.user_permission.has_object_permission(self.make_request(), APIView(), None)
def test_allows_current_user(self) -> None:
assert self.user_permission.has_object_permission(
self.make_request(self.normal_user), APIView(), self.normal_user
)
@override_settings(SUPERUSER_ORG_ID=1000)
def test_allows_active_superuser(self) -> None:
# The user passed in and the user on the request must be different to
# check superuser.
self.create_organization(owner=self.superuser, id=1000)
assert self.user_permission.has_object_permission(
self.superuser_request, APIView(), self.normal_user
)
with self.settings(SENTRY_SELF_HOSTED=False):
assert self.user_permission.has_object_permission(
self.superuser_request, APIView(), self.normal_user
)
@override_settings(SENTRY_SELF_HOSTED=False, SUPERUSER_ORG_ID=1000)
@override_options({"superuser.read-write.ga-rollout": True})
def test_active_superuser_read(self) -> None:
# superuser read can hit GET
request = self.make_request(user=self.superuser, is_superuser=True, method="GET")
self.create_organization(owner=self.superuser, id=1000)
assert self.user_permission.has_object_permission(request, APIView(), self.normal_user)
# superuser read cannot hit POST
request.method = "POST"
assert not self.user_permission.has_object_permission(request, APIView(), self.normal_user)
@override_settings(SENTRY_SELF_HOSTED=False, SUPERUSER_ORG_ID=1000)
@override_options({"superuser.read-write.ga-rollout": True})
def test_active_superuser_write(self) -> None:
# superuser write can hit GET
self.add_user_permission(self.superuser, "superuser.write")
self.create_organization(owner=self.superuser, id=1000)
request = self.make_request(user=self.superuser, is_superuser=True, method="GET")
assert self.user_permission.has_object_permission(request, APIView(), self.normal_user)
# superuser write can hit POST
request.method = "POST"
assert self.user_permission.has_object_permission(request, APIView(), self.normal_user)
def test_rejects_active_staff(self) -> None:
# The user passed in and the user on the request must be different to
# check staff.
assert not self.user_permission.has_object_permission(
self.staff_request, APIView(), self.normal_user
)
def test_rejects_user_as_anonymous(self) -> None:
assert not self.user_permission.has_object_permission(
self.make_request(), APIView(), self.normal_user
)
def test_rejects_other_user(self) -> None:
other_user = self.create_user()
assert not self.user_permission.has_object_permission(
self.make_request(self.staff_user), APIView(), other_user
)
@all_silo_test
| UserPermissionTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 60069,
"end": 60369
} | class ____(AliasedReturnsRows):
element: FromClause
@util.ro_non_memoized_property
def description(self) -> str:
name = self.name
if isinstance(name, _anonymous_label):
return f"Anonymous alias of {self.element.description}"
return name
| FromClauseAlias |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/exc.py | {
"start": 4209,
"end": 5520
} | class ____(sa_exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`_query.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
"""
@util.preload_module("sqlalchemy.orm.base")
def __init__(self, state: InstanceState[Any], msg: Optional[str] = None):
base = util.preloaded.orm_base
if not msg:
msg = (
"Instance '%s' has been deleted, or its "
"row is otherwise not present." % base.state_str(state)
)
sa_exc.InvalidRequestError.__init__(self, msg)
def __reduce__(self) -> Any:
return self.__class__, (None, self.args[0])
| ObjectDeletedError |
python | pallets__werkzeug | src/werkzeug/local.py | {
"start": 11366,
"end": 12345
} | class ____(_ProxyLookup):
"""Look up an augmented assignment method on a proxied object. The
method is wrapped to return the proxy instead of the object.
"""
__slots__ = ()
def __init__(
self,
f: t.Callable[..., t.Any] | None = None,
fallback: t.Callable[[LocalProxy[t.Any]], t.Any] | None = None,
) -> None:
super().__init__(f, fallback)
def bind_f(instance: LocalProxy[t.Any], obj: t.Any) -> t.Callable[..., t.Any]:
def i_op(self: t.Any, other: t.Any) -> LocalProxy[t.Any]:
f(self, other) # type: ignore
return instance
return i_op.__get__(obj, type(obj)) # type: ignore
self.bind_f = bind_f
def _l_to_r_op(op: F) -> F:
"""Swap the argument order to turn an l-op into an r-op."""
def r_op(obj: t.Any, other: t.Any) -> t.Any:
return op(other, obj)
return t.cast(F, r_op)
def _identity(o: T) -> T:
return o
| _ProxyIOp |
python | django__django | django/contrib/contenttypes/fields.py | {
"start": 11806,
"end": 12342
} | class ____(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
super().__init__(
field,
to,
related_name=related_query_name or "+",
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
| GenericRel |
python | numba__numba | numba/core/types/misc.py | {
"start": 10263,
"end": 11977
} | class ____(Callable, Opaque):
"""
The type of the jitted class (not instance). When the type of a class
is called, its constructor is invoked.
"""
mutable = True
name_prefix = "jitclass"
instance_type_class = ClassInstanceType
def __init__(self, class_def, ctor_template_cls, struct, jit_methods,
jit_props, jit_static_methods):
self.class_name = class_def.__name__
self.class_doc = class_def.__doc__
self._ctor_template_class = ctor_template_cls
self.jit_methods = jit_methods
self.jit_props = jit_props
self.jit_static_methods = jit_static_methods
self.struct = struct
fielddesc = ','.join("{0}:{1}".format(k, v) for k, v in struct.items())
name = "{0}.{1}#{2:x}<{3}>".format(self.name_prefix, self.class_name,
id(self), fielddesc)
super(ClassType, self).__init__(name)
def get_call_type(self, context, args, kws):
return self.ctor_template(context).apply(args, kws)
def get_call_signatures(self):
return (), True
def get_impl_key(self, sig):
return type(self)
@property
def methods(self):
return {k: v.py_func for k, v in self.jit_methods.items()}
@property
def static_methods(self):
return {k: v.py_func for k, v in self.jit_static_methods.items()}
@property
def instance_type(self):
return ClassInstanceType(self)
@property
def ctor_template(self):
return self._specialize_template(self._ctor_template_class)
def _specialize_template(self, basecls):
return type(basecls.__name__, (basecls,), dict(key=self))
| ClassType |
python | django__django | tests/db_functions/tests.py | {
"start": 251,
"end": 303
} | class ____(Upper):
bilateral = True
| UpperBilateral |
python | huggingface__transformers | tests/models/whisper/test_modeling_whisper.py | {
"start": 6279,
"end": 13653
} | class ____:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden_layers
seq_length=60,
is_training=True,
use_labels=False,
vocab_size=200,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
input_channels=1,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
max_source_positions=30,
max_target_positions=40,
bos_token_id=98,
eos_token_id=98,
pad_token_id=0,
num_mel_bins=80,
decoder_start_token_id=85,
num_conv_layers=1,
suppress_tokens=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.input_channels = input_channels
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_mel_bins = num_mel_bins
self.max_position_embeddings = max_position_embeddings
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
self.num_conv_layers = num_conv_layers
self.suppress_tokens = suppress_tokens
def prepare_config_and_inputs(self):
input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size)
decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device)
config = self.get_config()
inputs_dict = prepare_whisper_inputs_dict(
config,
attention_mask=None,
input_features=input_features,
decoder_input_ids=decoder_input_ids,
)
return config, inputs_dict
def get_config(self):
return WhisperConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
input_channels=self.input_channels,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
max_source_positions=self.max_source_positions,
max_target_positions=self.max_target_positions,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
decoder_ffn_dim=self.hidden_size,
encoder_ffn_dim=self.hidden_size,
decoder_start_token_id=self.decoder_start_token_id,
suppress_tokens=self.suppress_tokens,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def get_subsampled_output_lengths(self, input_lengths):
"""
Computes the output length of the convolutional layers
"""
for i in range(self.num_conv_layers):
input_lengths = (input_lengths - 1) // 2 + 1
return input_lengths
def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False):
model = WhisperModel(config=config).to(torch_device).eval()
if freeze_encoder:
model.freeze_encoder()
input_features = inputs_dict["input_features"]
decoder_input_ids = inputs_dict["decoder_input_ids"]
# first forward pass
last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16))
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = WhisperModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["decoder_input_ids"]
attention_mask = inputs_dict["decoder_attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = WhisperModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = WhisperEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = WhisperDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| WhisperModelTester |
python | pytest-dev__pytest-asyncio | docs/how-to-guides/multiple_loops_example.py | {
"start": 184,
"end": 638
} | class ____(DefaultEventLoopPolicy):
pass
@pytest.fixture(
scope="session",
params=(
CustomEventLoopPolicy(),
CustomEventLoopPolicy(),
),
)
def event_loop_policy(request):
return request.param
@pytest.mark.asyncio
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
async def test_uses_custom_event_loop_policy():
assert isinstance(asyncio.get_event_loop_policy(), CustomEventLoopPolicy)
| CustomEventLoopPolicy |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_health/asset_materialization_health.py | {
"start": 14678,
"end": 14835
} | class ____:
num_missing_partitions: int
total_num_partitions: int
@whitelist_for_serdes
@record.record
| AssetHealthMaterializationHealthyPartitionedMeta |
python | scipy__scipy | scipy/interpolate/tests/test_bsplines.py | {
"start": 1506,
"end": 27311
} | class ____:
def test_ctor(self, xp):
# knots should be an ordered 1-D array of finite real numbers
assert_raises((TypeError, ValueError), BSpline,
**dict(t=[1, 1.j], c=[1.], k=0))
with np.errstate(invalid='ignore'):
assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
# for n+k+1 knots and degree k need at least n coefficients
assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
assert_raises(ValueError, BSpline,
**dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
# non-integer orders
assert_raises(TypeError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
assert_raises(TypeError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
# basic interval cannot have measure zero (here: [1..1])
assert_raises(ValueError, BSpline,
**dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
# tck vs self.tck
n, k = 11, 3
t = xp.arange(n+k+1, dtype=xp.float64)
c = xp.asarray(np.random.random(n))
b = BSpline(t, c, k)
xp_assert_close(t, b.t)
xp_assert_close(c, b.c)
assert k == b.k
def test_tck(self):
b = _make_random_spline()
tck = b.tck
xp_assert_close(b.t, tck[0], atol=1e-15, rtol=1e-15)
xp_assert_close(b.c, tck[1], atol=1e-15, rtol=1e-15)
assert b.k == tck[2]
# b.tck is read-only
with pytest.raises(AttributeError):
b.tck = 'foo'
def test_call_namespace(self, xp):
# similar to test_degree_0 below, only parametrized with xp
# (test_degree_0 tests array-like inputs, which resolve to numpy)
b = BSpline(t=xp.asarray([0, 1., 2]), c=xp.asarray([3., 4]), k=0)
xx = xp.linspace(0, 2, 10)
expected = xp.where(xx < 1., xp.asarray(3., dtype=xp.float64), 4.0)
xp_assert_close(b(xx), expected)
def test_degree_0(self):
xx = np.linspace(0, 1, 10)
b = BSpline(t=[0, 1], c=[3.], k=0)
xp_assert_close(b(xx), np.ones_like(xx) * 3.0)
b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
xp_assert_close(b(xx), np.where(xx < 0.35, 3.0, 4.0))
def test_degree_1(self, xp):
t = xp.asarray([0, 1, 2, 3, 4])
c = xp.asarray([1.0, 2, 3])
k = 1
b = BSpline(t, c, k)
x = xp.linspace(1.0, 3.0, 50, dtype=xp.float64)
xp_assert_close(
b(x),
c[0]*B_012(x, xp=xp) + c[1]*B_012(x-1, xp=xp) + c[2]*B_012(x-2, xp=xp),
atol=1e-14
)
x_np, t_np, c_np = map(np.asarray, (x, t, c))
splev_result = splev(x_np, (t_np, c_np, k))
xp_assert_close(b(x), xp.asarray(splev_result), atol=1e-14)
def test_bernstein(self, xp):
# a special knot vector: Bernstein polynomials
k = 3
t = xp.asarray([0]*(k+1) + [1]*(k+1))
c = xp.asarray([1., 2., 3., 4.])
bp = BPoly(xp.reshape(c, (-1, 1)), xp.asarray([0, 1]))
bspl = BSpline(t, c, k)
xx = xp.linspace(-1., 2., 10)
xp_assert_close(bp(xx, extrapolate=True),
bspl(xx, extrapolate=True), atol=1e-14)
@skip_xp_backends("dask.array", reason="_naive_eval is not dask-compatible")
@skip_xp_backends("jax.numpy", reason="too slow; XXX a slow-if marker?")
@skip_xp_backends("torch", reason="OOB on CI")
def test_rndm_naive_eval(self, xp):
# test random coefficient spline *on the base interval*,
# t[k] <= x < t[-k-1]
b = _make_random_spline(xp=xp)
t, c, k = b.tck
xx = xp.linspace(t[k], t[-k-1], 50)
y_b = b(xx)
y_n = xp.stack([_naive_eval(x, t, c, k, xp=xp) for x in xx])
xp_assert_close(y_b, y_n, atol=1e-14)
y_n2 = xp.stack([_naive_eval_2(x, t, c, k, xp=xp) for x in xx])
xp_assert_close(y_b, y_n2, atol=1e-14)
def test_rndm_splev(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
xp_assert_close(b(xx), splev(xx, (t, c, k)), atol=1e-14)
def test_rndm_splrep(self):
rng = np.random.RandomState(1234)
x = np.sort(rng.random(20))
y = rng.random(20)
tck = splrep(x, y)
b = BSpline(*tck)
t, k = b.t, b.k
xx = np.linspace(t[k], t[-k-1], 80)
xp_assert_close(b(xx), splev(xx, tck), atol=1e-14)
def test_rndm_unity(self, xp):
b = _make_random_spline(xp=xp)
b.c = xp.ones_like(b.c)
xx = xp.linspace(b.t[b.k], b.t[-b.k-1], 100, dtype=xp.float64)
xp_assert_close(b(xx), xp.ones_like(xx))
def test_vectorization(self, xp):
rng = np.random.RandomState(1234)
n, k = 22, 3
t = np.sort(rng.random(n))
c = rng.random(size=(n, 6, 7))
t, c = map(xp.asarray, (t, c))
b = BSpline(t, c, k)
tm, tp = t[k], t[-k-1]
xx = tm + (tp - tm) * xp.asarray(rng.random((3, 4, 5)))
assert b(xx).shape == (3, 4, 5, 6, 7)
def test_len_c(self):
# for n+k+1 knots, only first n coefs are used.
# and BTW this is consistent with FITPACK
rng = np.random.RandomState(1234)
n, k = 33, 3
t = np.sort(rng.random(n+k+1))
c = rng.random(n)
# pad coefficients with random garbage
c_pad = np.r_[c, rng.random(k+1)]
b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
dt = t[-1] - t[0]
xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
xp_assert_close(b(xx), b_pad(xx), atol=1e-14)
xp_assert_close(b(xx), splev(xx, (t, c, k)), atol=1e-14)
xp_assert_close(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
def test_endpoints(self, num_parallel_threads):
# base interval is closed
b = _make_random_spline()
t, _, k = b.tck
tm, tp = t[k], t[-k-1]
# atol = 1e-9 if num_parallel_threads == 1 else 1e-7
for extrap in (True, False):
xp_assert_close(b([tm, tp], extrap),
b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9, rtol=1e-7)
def test_continuity(self, num_parallel_threads):
# assert continuity at internal knots
b = _make_random_spline()
t, _, k = b.tck
xp_assert_close(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
atol=1e-9)
def test_extrap(self, xp):
b = _make_random_spline(xp=xp)
t, c, k = b.tck
dt = t[-1] - t[0]
xx = xp.linspace(t[k] - dt, t[-k-1] + dt, 50)
mask = (t[k] < xx) & (xx < t[-k-1])
# extrap has no effect within the base interval
xp_assert_close(b(xx[mask], extrapolate=True),
b(xx[mask], extrapolate=False))
# extrapolated values agree with FITPACK
xx_np, t_np, c_np = map(np.asarray, (xx, t, c))
splev_result = xp.asarray(splev(xx_np, (t_np, c_np, k), ext=0))
xp_assert_close(b(xx, extrapolate=True), splev_result)
def test_default_extrap(self):
# BSpline defaults to extrapolate=True
b = _make_random_spline()
t, _, k = b.tck
xx = [t[0] - 1, t[-1] + 1]
yy = b(xx)
assert not np.all(np.isnan(yy))
def test_periodic_extrap(self, xp):
rng = np.random.RandomState(1234)
t = np.sort(rng.random(8))
c = rng.random(4)
t, c = map(xp.asarray, (t, c))
k = 3
b = BSpline(t, c, k, extrapolate='periodic')
n = t.shape[0] - (k + 1)
dt = t[-1] - t[0]
xx = xp.linspace(t[k] - dt, t[n] + dt, 50)
xy = t[k] + (xx - t[k]) % (t[n] - t[k])
xy_np, t_np, c_np = map(np.asarray, (xy, t, c))
atol = 1e-12 if xp_default_dtype(xp) == xp.float64 else 2e-7
xp_assert_close(
b(xx), xp.asarray(splev(xy_np, (t_np, c_np, k))), atol=atol
)
# Direct check
xx = xp.asarray([-1, 0, 0.5, 1])
xy = t[k] + (xx - t[k]) % (t[n] - t[k])
xp_assert_close(
b(xx, extrapolate='periodic'),
b(xy, extrapolate=True),
atol=1e-14 if xp_default_dtype(xp) == xp.float64 else 5e-7
)
def test_ppoly(self):
b = _make_random_spline()
t, c, k = b.tck
pp = PPoly.from_spline((t, c, k))
xx = np.linspace(t[k], t[-k], 100)
xp_assert_close(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
def test_derivative_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[0], t[-1], 50)
xx = np.r_[xx, t]
for der in range(1, k+1):
yd = splev(xx, (t, c, k), der=der)
xp_assert_close(yd, b(xx, nu=der), atol=1e-14)
# higher derivatives all vanish
xp_assert_close(b(xx, nu=k+1), np.zeros_like(xx), atol=1e-14)
def test_derivative_jumps(self):
# example from de Boor, Chap IX, example (24)
# NB: knots augmented & corresp coefs are zeroed out
# in agreement with the convention (29)
k = 2
t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
rng = np.random.RandomState(1234)
c = np.r_[0, 0, rng.random(5), 0, 0]
b = BSpline(t, c, k)
# b is continuous at x != 6 (triple knot)
x = np.asarray([1, 3, 4, 6])
xp_assert_close(b(x[x != 6] - 1e-10),
b(x[x != 6] + 1e-10))
assert not np.allclose(b(6.-1e-10), b(6+1e-10))
# 1st derivative jumps at double knots, 1 & 6:
x0 = np.asarray([3, 4])
xp_assert_close(b(x0 - 1e-10, nu=1),
b(x0 + 1e-10, nu=1))
x1 = np.asarray([1, 6])
assert not np.allclose(b(x1 - 1e-10, nu=1), b(x1 + 1e-10, nu=1))
# 2nd derivative is not guaranteed to be continuous either
assert not np.allclose(b(x - 1e-10, nu=2), b(x + 1e-10, nu=2))
def test_basis_element_quadratic(self, xp):
xx = xp.linspace(-1, 4, 20)
b = BSpline.basis_element(t=xp.asarray([0, 1, 2, 3]))
xx_np, t_np, c_np = map(np.asarray, (xx, b.t, b.c))
splev_result = xp.asarray(splev(xx_np, (t_np, c_np, b.k)))
xp_assert_close(b(xx), splev_result, atol=1e-14)
atol=1e-14 if xp_default_dtype(xp) == xp.float64 else 1e-7
xp_assert_close(b(xx), xp.asarray(B_0123(xx), dtype=xp.float64), atol=atol)
b = BSpline.basis_element(t=xp.asarray([0, 1, 1, 2]))
xx = xp.linspace(0, 2, 10, dtype=xp.float64)
xp_assert_close(b(xx),
xp.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
def test_basis_element_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
xp_assert_close(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
def test_cmplx(self):
b = _make_random_spline()
t, c, k = b.tck
cc = c * (1. + 3.j)
b = BSpline(t, cc, k)
b_re = BSpline(t, b.c.real, k)
b_im = BSpline(t, b.c.imag, k)
xx = np.linspace(t[k], t[-k-1], 20)
xp_assert_close(b(xx).real, b_re(xx), atol=1e-14)
xp_assert_close(b(xx).imag, b_im(xx), atol=1e-14)
def test_nan(self, xp):
# nan in, nan out.
b = BSpline.basis_element(xp.asarray([0, 1, 1, 2]))
assert xp.isnan(b(xp.nan))
def test_derivative_method(self, xp):
b = _make_random_spline(k=5, xp=xp)
t, c, k = b.tck
b0 = BSpline(t, c, k)
xx = xp.linspace(t[k], t[-k-1], 20)
for j in range(1, k):
b = b.derivative()
xp_assert_close(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
def test_antiderivative_method(self, xp):
b = _make_random_spline(xp=xp)
t, c, k = b.tck
xx = xp.linspace(t[k], t[-k-1], 20)
xp_assert_close(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
# repeat with N-D array for c
c = xp.stack((c, c, c), axis=1)
c = xp.stack((c, c), axis=2)
b = BSpline(t, c, k)
xp_assert_close(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
def test_integral(self, xp):
b = BSpline.basis_element(xp.asarray([0, 1, 2])) # x for x < 1 else 2 - x
assert math.isclose(b.integrate(0, 1), 0.5, abs_tol=1e-14)
assert math.isclose(b.integrate(1, 0), -1 * 0.5, abs_tol=1e-14)
assert math.isclose(b.integrate(1, 0), -0.5, abs_tol=1e-14)
assert math.isclose(b.integrate(0, 1), 0.5, abs_tol=1e-14)
assert math.isclose(b.integrate(1, 0), -1 * 0.5, abs_tol=1e-14)
assert math.isclose(b.integrate(1, 0), -0.5, abs_tol=1e-14)
# extrapolate or zeros outside of [0, 2]; default is yes
assert math.isclose(b.integrate(-1, 1), 0.0, abs_tol=1e-14)
assert math.isclose(b.integrate(-1, 1, extrapolate=True), 0.0, abs_tol=1e-14)
assert math.isclose(b.integrate(-1, 1, extrapolate=False), 0.5, abs_tol=1e-14)
assert math.isclose(b.integrate(1, -1, extrapolate=False), -0.5, abs_tol=1e-14)
# Test ``_fitpack._splint()``
assert math.isclose(b.integrate(1, -1, extrapolate=False),
_impl.splint(1, -1, b.tck), abs_tol=1e-14)
# Test ``extrapolate='periodic'``.
b.extrapolate = 'periodic'
i = b.antiderivative()
period_int = xp.asarray(i(2) - i(0), dtype=xp.float64)
assert math.isclose(b.integrate(0, 2), period_int)
assert math.isclose(b.integrate(2, 0), -1 * period_int)
assert math.isclose(b.integrate(-9, -7), period_int)
assert math.isclose(b.integrate(-8, -4), 2 * period_int)
xp_assert_close(b.integrate(0.5, 1.5),
xp.asarray(i(1.5) - i(0.5)))
xp_assert_close(b.integrate(1.5, 3),
xp.asarray(i(1) - i(0) + i(2) - i(1.5)))
xp_assert_close(b.integrate(1.5 + 12, 3 + 12),
xp.asarray(i(1) - i(0) + i(2) - i(1.5)))
xp_assert_close(b.integrate(1.5, 3 + 12),
xp.asarray(i(1) - i(0) + i(2) - i(1.5) + 6 * period_int))
xp_assert_close(b.integrate(0, -1), xp.asarray(i(0) - i(1)))
xp_assert_close(b.integrate(-9, -10), xp.asarray(i(0) - i(1)))
xp_assert_close(b.integrate(0, -9),
xp.asarray(i(1) - i(2) - 4 * period_int))
def test_integrate_ppoly(self):
# test .integrate method to be consistent with PPoly.integrate
x = [0, 1, 2, 3, 4]
b = make_interp_spline(x, x)
b.extrapolate = 'periodic'
p = PPoly.from_spline(b)
for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:
xp_assert_close(b.integrate(x0, x1),
p.integrate(x0, x1))
def test_integrate_0D_always(self):
# make sure the result is always a 0D array (not a python scalar)
b = BSpline.basis_element([0, 1, 2])
for extrapolate in (True, False):
res = b.integrate(0, 1, extrapolate=extrapolate)
assert isinstance(res, np.ndarray)
assert res.ndim == 0
def test_subclassing(self):
# classmethods should not decay to the base class
class B(BSpline):
pass
b = B.basis_element([0, 1, 2, 2])
assert b.__class__ == B
assert b.derivative().__class__ == B
assert b.antiderivative().__class__ == B
@pytest.mark.parametrize('axis', range(-4, 4))
def test_axis(self, axis, xp):
n, k = 22, 3
t = xp.linspace(0, 1, n + k + 1)
sh = [6, 7, 8]
# We need the positive axis for some of the indexing and slices used
# in this test.
pos_axis = axis % 4
sh.insert(pos_axis, n) # [22, 6, 7, 8] etc
sh = tuple(sh)
rng = np.random.RandomState(1234)
c = xp.asarray(rng.random(size=sh))
b = BSpline(t, c, k, axis=axis)
assert b.c.shape == (sh[pos_axis],) + sh[:pos_axis] + sh[pos_axis+1:]
xp = rng.random((3, 4, 5))
assert b(xp).shape == sh[:pos_axis] + xp.shape + sh[pos_axis+1:]
# -c.ndim <= axis < c.ndim
for ax in [-c.ndim - 1, c.ndim]:
assert_raises(AxisError, BSpline,
**dict(t=t, c=c, k=k, axis=ax))
# derivative, antiderivative keeps the axis
for b1 in [BSpline(t, c, k, axis=axis).derivative(),
BSpline(t, c, k, axis=axis).derivative(2),
BSpline(t, c, k, axis=axis).antiderivative(),
BSpline(t, c, k, axis=axis).antiderivative(2)]:
assert b1.axis == b.axis
def test_neg_axis(self, xp):
k = 2
t = xp.asarray([0, 1, 2, 3, 4, 5, 6])
c = xp.asarray([[-1, 2, 0, -1], [2, 0, -3, 1]])
spl = BSpline(t, c, k, axis=-1)
spl0 = BSpline(t, c[0, :], k)
spl1 = BSpline(t, c[1, :], k)
xp_assert_equal(spl(2.5), xp.stack([spl0(2.5), spl1(2.5)]))
def test_design_matrix_bc_types(self):
'''
Splines with different boundary conditions are built on different
types of vectors of knots. As far as design matrix depends only on
vector of knots, `k` and `x` it is useful to make tests for different
boundary conditions (and as following different vectors of knots).
'''
def run_design_matrix_tests(n, k, bc_type):
'''
To avoid repetition of code the following function is provided.
'''
rng = np.random.RandomState(1234)
x = np.sort(rng.random_sample(n) * 40 - 20)
y = rng.random_sample(n) * 40 - 20
if bc_type == "periodic":
y[0] = y[-1]
bspl = make_interp_spline(x, y, k=k, bc_type=bc_type)
c = np.eye(len(bspl.t) - k - 1)
des_matr_def = BSpline(bspl.t, c, k)(x)
des_matr_csr = BSpline.design_matrix(x,
bspl.t,
k).toarray()
xp_assert_close(des_matr_csr @ bspl.c, y, atol=1e-14)
xp_assert_close(des_matr_def, des_matr_csr, atol=1e-14)
# "clamped" and "natural" work only with `k = 3`
n = 11
k = 3
for bc in ["clamped", "natural"]:
run_design_matrix_tests(n, k, bc)
# "not-a-knot" works with odd `k`
for k in range(3, 8, 2):
run_design_matrix_tests(n, k, "not-a-knot")
# "periodic" works with any `k` (even more than `n`)
n = 5 # smaller `n` to test `k > n` case
for k in range(2, 7):
run_design_matrix_tests(n, k, "periodic")
@pytest.mark.parametrize('extrapolate', [False, True, 'periodic'])
@pytest.mark.parametrize('degree', range(5))
def test_design_matrix_same_as_BSpline_call(self, extrapolate, degree):
"""Test that design_matrix(x) is equivalent to BSpline(..)(x)."""
rng = np.random.RandomState(1234)
x = rng.random_sample(10 * (degree + 1))
xmin, xmax = np.amin(x), np.amax(x)
k = degree
t = np.r_[np.linspace(xmin - 2, xmin - 1, degree),
np.linspace(xmin, xmax, 2 * (degree + 1)),
np.linspace(xmax + 1, xmax + 2, degree)]
c = np.eye(len(t) - k - 1)
bspline = BSpline(t, c, k, extrapolate)
xp_assert_close(
bspline(x), BSpline.design_matrix(x, t, k, extrapolate).toarray()
)
# extrapolation regime
x = np.array([xmin - 10, xmin - 1, xmax + 1.5, xmax + 10])
if not extrapolate:
with pytest.raises(ValueError):
BSpline.design_matrix(x, t, k, extrapolate)
else:
xp_assert_close(
bspline(x),
BSpline.design_matrix(x, t, k, extrapolate).toarray()
)
def test_design_matrix_x_shapes(self):
# test for different `x` shapes
rng = np.random.RandomState(1234)
n = 10
k = 3
x = np.sort(rng.random_sample(n) * 40 - 20)
y = rng.random_sample(n) * 40 - 20
bspl = make_interp_spline(x, y, k=k)
for i in range(1, 4):
xc = x[:i]
yc = y[:i]
des_matr_csr = BSpline.design_matrix(xc,
bspl.t,
k).toarray()
xp_assert_close(des_matr_csr @ bspl.c, yc, atol=1e-14)
def test_design_matrix_t_shapes(self):
# test for minimal possible `t` shape
t = [1., 1., 1., 2., 3., 4., 4., 4.]
des_matr = BSpline.design_matrix(2., t, 3).toarray()
xp_assert_close(des_matr,
[[0.25, 0.58333333, 0.16666667, 0.]],
atol=1e-14)
def test_design_matrix_asserts(self):
rng = np.random.RandomState(1234)
n = 10
k = 3
x = np.sort(rng.random_sample(n) * 40 - 20)
y = rng.random_sample(n) * 40 - 20
bspl = make_interp_spline(x, y, k=k)
# invalid vector of knots (should be a 1D non-descending array)
# here the actual vector of knots is reversed, so it is invalid
with assert_raises(ValueError):
BSpline.design_matrix(x, bspl.t[::-1], k)
k = 2
t = [0., 1., 2., 3., 4., 5.]
x = [1., 2., 3., 4.]
# out of bounds
with assert_raises(ValueError):
BSpline.design_matrix(x, t, k)
@pytest.mark.parametrize('bc_type', ['natural', 'clamped',
'periodic', 'not-a-knot'])
def test_from_power_basis(self, bc_type):
# TODO: convert CubicSpline
rng = np.random.RandomState(1234)
x = np.sort(rng.random(20))
y = rng.random(20)
if bc_type == 'periodic':
y[-1] = y[0]
cb = CubicSpline(x, y, bc_type=bc_type)
bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
xx = np.linspace(0, 1, 20)
xp_assert_close(cb(xx), bspl(xx), atol=1e-15)
bspl_new = make_interp_spline(x, y, bc_type=bc_type)
xp_assert_close(bspl.c, bspl_new.c, atol=1e-15)
@pytest.mark.parametrize('bc_type', ['natural', 'clamped',
'periodic', 'not-a-knot'])
def test_from_power_basis_complex(self, bc_type):
# TODO: convert CubicSpline
rng = np.random.RandomState(1234)
x = np.sort(rng.random(20))
y = rng.random(20) + rng.random(20) * 1j
if bc_type == 'periodic':
y[-1] = y[0]
cb = CubicSpline(x, y, bc_type=bc_type)
bspl = BSpline.from_power_basis(cb, bc_type=bc_type)
bspl_new_real = make_interp_spline(x, y.real, bc_type=bc_type)
bspl_new_imag = make_interp_spline(x, y.imag, bc_type=bc_type)
xp_assert_close(bspl.c, bspl_new_real.c + 1j * bspl_new_imag.c, atol=1e-15)
def test_from_power_basis_exmp(self):
'''
For x = [0, 1, 2, 3, 4] and y = [1, 1, 1, 1, 1]
the coefficients of Cubic Spline in the power basis:
$[[0, 0, 0, 0, 0],\\$
$[0, 0, 0, 0, 0],\\$
$[0, 0, 0, 0, 0],\\$
$[1, 1, 1, 1, 1]]$
It could be shown explicitly that coefficients of the interpolating
function in B-spline basis are c = [1, 1, 1, 1, 1, 1, 1]
'''
x = np.array([0, 1, 2, 3, 4])
y = np.array([1, 1, 1, 1, 1])
bspl = BSpline.from_power_basis(CubicSpline(x, y, bc_type='natural'),
bc_type='natural')
xp_assert_close(bspl.c, [1.0, 1, 1, 1, 1, 1, 1], atol=1e-15)
def test_read_only(self):
# BSpline must work on read-only knots and coefficients.
t = np.array([0, 1])
c = np.array([3.0])
t.setflags(write=False)
c.setflags(write=False)
xx = np.linspace(0, 1, 10)
xx.setflags(write=False)
b = BSpline(t=t, c=c, k=0)
xp_assert_close(b(xx), np.ones_like(xx) * 3.0)
def test_concurrency(self, xp):
# Check that no segfaults appear with concurrent access to BSpline
b = _make_random_spline(xp=xp)
def worker_fn(_, b):
t, _, k = b.tck
xx = xp.linspace(t[k], t[-k-1], 10000)
b(xx)
_run_concurrent_barrier(10, worker_fn, b)
@pytest.mark.xfail(
sys.platform == "cygwin",
reason="threading.get_native_id not implemented",
raises=AttributeError
)
def test_memmap(self, tmpdir):
# Make sure that memmaps can be used as t and c atrributes after the
# spline has been constructed. This is similar to what happens in a
# scikit-learn context, where joblib can create read-only memmap to
# share objects between workers. For more details, see
# https://github.com/scipy/scipy/issues/22143
b = _make_random_spline()
xx = np.linspace(0, 1, 10)
expected = b(xx)
tid = threading.get_native_id()
t_mm = np.memmap(str(tmpdir.join(f't{tid}.dat')), mode='w+',
dtype=b.t.dtype, shape=b.t.shape)
t_mm[:] = b.t
c_mm = np.memmap(str(tmpdir.join(f'c{tid}.dat')), mode='w+',
dtype=b.c.dtype, shape=b.c.shape)
c_mm[:] = b.c
b.t = t_mm
b.c = c_mm
xp_assert_close(b(xx), expected)
@make_xp_test_case(BSpline)
| TestBSpline |
python | python-poetry__poetry | src/poetry/console/commands/show.py | {
"start": 1249,
"end": 23754
} | class ____(GroupCommand, EnvCommand):
name = "show"
description = "Shows information about packages."
arguments: ClassVar[list[Argument]] = [
argument("package", "The package to inspect", optional=True)
]
options: ClassVar[list[Option]] = [
*GroupCommand._group_dependency_options(),
option("tree", "t", "List the dependencies as a tree."),
option(
"why",
None,
"When showing the full list, or a <info>--tree</info> for a single package,"
" display whether they are a direct dependency or required by other"
" packages",
),
option("latest", "l", "Show the latest version."),
option(
"outdated",
"o",
"Show the latest version but only for packages that are outdated.",
),
option(
"all",
"a",
"Show all packages (even those not compatible with current system).",
),
option("top-level", "T", "Show only top-level dependencies."),
option(
"no-truncate",
None,
"Do not truncate the output based on the terminal width.",
),
option(
"format",
"f",
"Specify the output format (`json` or `text`). Default is `text`. `json` cannot be combined with the <info>--tree</info> option.",
flag=False,
default="text",
),
]
help = """The show command displays detailed information about a package, or
lists all packages available."""
colors: ClassVar[list[str]] = ["cyan", "yellow", "green", "magenta", "blue"]
def handle(self) -> int:
package = self.argument("package")
if self.option("tree"):
self.init_styles(self.io)
if self.option("top-level"):
if self.option("tree"):
self.line_error(
"<error>Error: Cannot use --tree and --top-level at the same"
" time.</error>"
)
return 1
if package is not None:
self.line_error(
"<error>Error: Cannot use --top-level when displaying a single"
" package.</error>"
)
return 1
if self.option("why"):
if self.option("tree") and package is None:
self.line_error(
"<error>Error: --why requires a package when combined with"
" --tree.</error>"
)
return 1
if not self.option("tree") and package:
self.line_error(
"<error>Error: --why cannot be used without --tree when displaying"
" a single package.</error>"
)
return 1
if self.option("format") not in set(OutputFormats):
self.line_error(
f"<error>Error: Invalid output format. Supported formats are: {', '.join(OutputFormats)}.</error>"
)
return 1
if self.option("format") != OutputFormats.TEXT and self.option("tree"):
self.line_error(
"<error>Error: --tree option can only be used with the text output option.</error>"
)
return 1
if self.option("outdated"):
self.io.input.set_option("latest", True)
if not self.poetry.locker.is_locked():
self.line_error(
"<error>Error: poetry.lock not found. Run `poetry lock` to create"
" it.</error>"
)
return 1
locked_repo = self.poetry.locker.locked_repository()
if package:
return self._display_single_package_information(package, locked_repo)
root = self.project_with_activated_groups_only()
# Show tree view if requested
if self.option("tree"):
return self._display_packages_tree_information(locked_repo, root)
return self._display_packages_information(locked_repo, root)
def _display_single_package_information(
self, package: str, locked_repository: Repository
) -> int:
locked_packages = locked_repository.packages
canonicalized_package = canonicalize_name(package)
pkg = None
for locked in locked_packages:
if locked.name == canonicalized_package:
pkg = locked
break
if not pkg:
raise ValueError(f"Package {package} not found")
required_by = reverse_deps(pkg, locked_repository)
if self.option("tree"):
if self.option("why"):
# The default case if there's no reverse dependencies is to query
# the subtree for pkg but if any rev-deps exist we'll query for each
# of them in turn
packages = [pkg]
if required_by:
packages = [
p for p in locked_packages for r in required_by if p.name == r
]
else:
# if no rev-deps exist we'll make this clear as it can otherwise
# look very odd for packages that also have no or few direct
# dependencies
self.io.write_line(f"Package {package} is a direct dependency.")
for p in packages:
self.display_package_tree(
self.io, p, locked_packages, why_package=pkg
)
else:
self.display_package_tree(self.io, pkg, locked_packages)
return 0
if self.option("format") == OutputFormats.JSON:
package_info: dict[str, str | dict[str, str]] = {
"name": pkg.pretty_name,
"version": pkg.pretty_version,
"description": pkg.description,
}
if pkg.requires:
package_info["dependencies"] = {
dependency.pretty_name: dependency.pretty_constraint
for dependency in pkg.requires
}
if required_by:
package_info["required_by"] = required_by
self.line(json.dumps(package_info))
return 0
rows: Rows = [
["<info>name</>", f" : <c1>{pkg.pretty_name}</>"],
["<info>version</>", f" : <b>{pkg.pretty_version}</b>"],
["<info>description</>", f" : {pkg.description}"],
]
self.table(rows=rows, style="compact").render()
if pkg.requires:
self.line("")
self.line("<info>dependencies</info>")
for dependency in pkg.requires:
self.line(
f" - <c1>{dependency.pretty_name}</c1>"
f" <b>{dependency.pretty_constraint}</b>"
)
if required_by:
self.line("")
self.line("<info>required by</info>")
for parent, requires_version in required_by.items():
self.line(f" - <c1>{parent}</c1> requires <b>{requires_version}</b>")
return 0
def _display_packages_information(
self, locked_repository: Repository, root: ProjectPackage
) -> int:
import shutil
from cleo.io.null_io import NullIO
from poetry.puzzle.solver import Solver
from poetry.repositories.installed_repository import InstalledRepository
from poetry.repositories.repository_pool import RepositoryPool
from poetry.utils.helpers import get_package_version_display_string
locked_packages = locked_repository.packages
pool = RepositoryPool.from_packages(locked_packages, self.poetry.config)
solver = Solver(
root,
pool=pool,
installed=[],
locked=locked_packages,
io=NullIO(),
)
solver.provider.load_deferred(False)
with solver.use_environment(self.env):
ops = solver.solve().calculate_operations()
required_locked_packages = {op.package for op in ops if not op.skipped}
show_latest = self.option("latest")
show_all = self.option("all")
show_top_level = self.option("top-level")
show_why = self.option("why")
width = (
sys.maxsize
if self.option("no-truncate")
else shutil.get_terminal_size().columns
)
name_length = version_length = latest_length = required_by_length = 0
latest_packages = {}
latest_statuses = {}
installed_repo = InstalledRepository.load(self.env)
requires = root.all_requires
# Computing widths
for locked in locked_packages:
if locked not in required_locked_packages and not show_all:
continue
current_length = len(locked.pretty_name)
if not self.io.output.is_decorated():
installed_status = self.get_installed_status(
locked, installed_repo.packages
)
if installed_status == "not-installed":
current_length += 4
if show_latest:
latest = self.find_latest_package(locked, root)
if not latest:
latest = locked
latest_packages[locked.pretty_name] = latest
update_status = latest_statuses[locked.pretty_name] = (
self.get_update_status(latest, locked)
)
if not self.option("outdated") or update_status != "up-to-date":
name_length = max(name_length, current_length)
version_length = max(
version_length,
len(
get_package_version_display_string(
locked, root=self.poetry.file.path.parent
)
),
)
latest_length = max(
latest_length,
len(
get_package_version_display_string(
latest, root=self.poetry.file.path.parent
)
),
)
if show_why:
required_by = reverse_deps(locked, locked_repository)
required_by_length = max(
required_by_length,
len(" from " + ",".join(required_by.keys())),
)
else:
name_length = max(name_length, current_length)
version_length = max(
version_length,
len(
get_package_version_display_string(
locked, root=self.poetry.file.path.parent
)
),
)
if show_why:
required_by = reverse_deps(locked, locked_repository)
required_by_length = max(
required_by_length, len(" from " + ",".join(required_by.keys()))
)
if self.option("format") == OutputFormats.JSON:
packages = []
for locked in locked_packages:
if locked not in required_locked_packages and not show_all:
continue
if (
show_latest
and self.option("outdated")
and latest_statuses[locked.pretty_name] == "up-to-date"
):
continue
if show_top_level and not any(locked.satisfies(r) for r in requires):
continue
package: dict[str, str | list[str]] = {}
package["name"] = locked.pretty_name
package["installed_status"] = self.get_installed_status(
locked, installed_repo.packages
)
package["version"] = get_package_version_display_string(
locked, root=self.poetry.file.path.parent
)
if show_latest:
latest = latest_packages[locked.pretty_name]
package["latest_version"] = get_package_version_display_string(
latest, root=self.poetry.file.path.parent
)
if show_why:
required_by = reverse_deps(locked, locked_repository)
if required_by:
package["required_by"] = list(required_by.keys())
package["description"] = locked.description
packages.append(package)
self.line(json.dumps(packages))
return 0
write_version = name_length + version_length + 3 <= width
write_latest = name_length + version_length + latest_length + 3 <= width
why_end_column = (
name_length + version_length + latest_length + required_by_length
)
write_why = show_why and (why_end_column + 3) <= width
write_description = (why_end_column + 24) <= width
for locked in locked_packages:
color = "cyan"
name = locked.pretty_name
install_marker = ""
if show_top_level and not any(locked.satisfies(r) for r in requires):
continue
if locked not in required_locked_packages:
if not show_all:
continue
color = "black;options=bold"
else:
installed_status = self.get_installed_status(
locked, installed_repo.packages
)
if installed_status == "not-installed":
color = "red"
if not self.io.output.is_decorated():
# Non installed in non decorated mode
install_marker = " (!)"
if (
show_latest
and self.option("outdated")
and latest_statuses[locked.pretty_name] == "up-to-date"
):
continue
line = (
f"<fg={color}>"
f"{name:{name_length - len(install_marker)}}{install_marker}</>"
)
if write_version:
version = get_package_version_display_string(
locked, root=self.poetry.file.path.parent
)
line += f" <b>{version:{version_length}}</b>"
if show_latest:
latest = latest_packages[locked.pretty_name]
update_status = latest_statuses[locked.pretty_name]
if write_latest:
color = "green"
if update_status == "semver-safe-update":
color = "red"
elif update_status == "update-possible":
color = "yellow"
version = get_package_version_display_string(
latest, root=self.poetry.file.path.parent
)
line += f" <fg={color}>{version:{latest_length}}</>"
if write_why:
required_by = reverse_deps(locked, locked_repository)
if required_by:
content = ",".join(required_by.keys())
# subtract 6 for ' from '
line += f" from {content:{required_by_length - 6}}"
else:
line += " " * required_by_length
if write_description:
description = locked.description
remaining = (
width - name_length - version_length - required_by_length - 4
)
if show_latest:
remaining -= latest_length
if len(locked.description) > remaining:
description = description[: remaining - 3] + "..."
line += " " + description
self.line(line)
return 0
def _display_packages_tree_information(
self, locked_repository: Repository, root: ProjectPackage
) -> int:
packages = locked_repository.packages
for p in packages:
for require in root.all_requires:
if p.name == require.name:
self.display_package_tree(self.io, p, packages)
break
return 0
def display_package_tree(
self,
io: IO,
package: Package,
installed_packages: list[Package],
why_package: Package | None = None,
) -> None:
io.write(f"<c1>{package.pretty_name}</c1>")
description = ""
if package.description:
description = " " + package.description
io.write_line(f" <b>{package.pretty_version}</b>{description}")
if why_package is not None:
dependencies = [p for p in package.requires if p.name == why_package.name]
else:
dependencies = package.requires
dependencies = sorted(
dependencies,
key=lambda x: x.name,
)
tree_bar = "├"
total = len(dependencies)
for i, dependency in enumerate(dependencies, 1):
if i == total:
tree_bar = "└"
level = 1
color = self.colors[level]
info = (
f"{tree_bar}── <{color}>{dependency.name}</{color}>"
f" {dependency.pretty_constraint}"
)
self._write_tree_line(io, info)
tree_bar = tree_bar.replace("└", " ")
packages_in_tree = [package.name, dependency.name]
self._display_tree(
io,
dependency,
installed_packages,
packages_in_tree,
tree_bar,
level + 1,
)
def _display_tree(
self,
io: IO,
dependency: Dependency,
installed_packages: list[Package],
packages_in_tree: list[NormalizedName],
previous_tree_bar: str = "├",
level: int = 1,
) -> None:
previous_tree_bar = previous_tree_bar.replace("├", "│")
dependencies = []
for package in installed_packages:
if package.name == dependency.name:
dependencies = package.requires
break
dependencies = sorted(
dependencies,
key=lambda x: x.name,
)
tree_bar = previous_tree_bar + " ├"
total = len(dependencies)
for i, dependency in enumerate(dependencies, 1):
current_tree = packages_in_tree
if i == total:
tree_bar = previous_tree_bar + " └"
color_ident = level % len(self.colors)
color = self.colors[color_ident]
circular_warn = ""
if dependency.name in current_tree:
circular_warn = "(circular dependency aborted here)"
info = (
f"{tree_bar}── <{color}>{dependency.name}</{color}>"
f" {dependency.pretty_constraint} {circular_warn}"
)
self._write_tree_line(io, info)
tree_bar = tree_bar.replace("└", " ")
if dependency.name not in current_tree:
current_tree.append(dependency.name)
self._display_tree(
io,
dependency,
installed_packages,
current_tree,
tree_bar,
level + 1,
)
def _write_tree_line(self, io: IO, line: str) -> None:
if not io.output.supports_utf8():
line = line.replace("└", "`-")
line = line.replace("├", "|-")
line = line.replace("──", "-")
line = line.replace("│", "|")
io.write_line(line)
def init_styles(self, io: IO) -> None:
from cleo.formatters.style import Style
for color in self.colors:
style = Style(color)
io.output.formatter.set_style(color, style)
io.error_output.formatter.set_style(color, style)
def find_latest_package(
self, package: Package, root: ProjectPackage
) -> Package | None:
from cleo.io.null_io import NullIO
from poetry.puzzle.provider import Provider
from poetry.version.version_selector import VersionSelector
# find the latest version allowed in this pool
requires = root.all_requires
if package.is_direct_origin():
for dep in requires:
if dep.name == package.name and dep.source_type == package.source_type:
provider = Provider(root, self.poetry.pool, NullIO())
return provider.search_for_direct_origin_dependency(dep)
allow_prereleases: bool | None = None
for dep in requires:
if dep.name == package.name:
allow_prereleases = dep.allows_prereleases()
break
name = package.name
selector = VersionSelector(self.poetry.pool)
return selector.find_best_candidate(
name, f">={package.pretty_version}", allow_prereleases
)
def get_update_status(self, latest: Package, package: Package) -> str:
from poetry.core.constraints.version import parse_constraint
if latest.full_pretty_version == package.full_pretty_version:
return "up-to-date"
constraint = parse_constraint("^" + package.pretty_version)
if constraint.allows(latest.version):
# It needs an immediate semver-compliant upgrade
return "semver-safe-update"
# it needs an upgrade but has potential BC breaks so is not urgent
return "update-possible"
def get_installed_status(
self, locked: Package, installed_packages: list[Package]
) -> str:
for package in installed_packages:
if locked.name == package.name:
return "installed"
return "not-installed"
| ShowCommand |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/execution_result.py | {
"start": 882,
"end": 9874
} | class ____(ABC):
@property
@abstractmethod
def job_def(self) -> JobDefinition: ...
@property
@abstractmethod
def dagster_run(self) -> DagsterRun: ...
@property
@abstractmethod
def all_events(self) -> Sequence[DagsterEvent]: ...
@property
@abstractmethod
def run_id(self) -> str:
"""The unique identifier of the executed run."""
...
@property
def success(self) -> bool:
"""bool: Whether execution was successful."""
return self.dagster_run.is_success
@property
def all_node_events(self) -> Sequence[DagsterEvent]:
"""List[DagsterEvent]: All dagster events from the execution."""
step_events: list[DagsterEvent] = []
for node_name in self.job_def.graph.node_dict.keys():
handle = NodeHandle.from_string(node_name)
step_events += self._filter_events_by_handle(handle)
return step_events
@abstractmethod
def _get_output_for_handle(self, handle: NodeHandle, output_name: str) -> object:
raise NotImplementedError()
def _filter_events_by_handle(self, handle: NodeHandle) -> Sequence[DagsterEvent]:
def _is_event_from_node(event: DagsterEvent) -> bool:
if not event.is_step_event:
return False
node_handle = cast("NodeHandle", event.node_handle)
return node_handle.is_or_descends_from(handle)
return self.filter_events(_is_event_from_node)
def output_value(self, output_name: str = DEFAULT_OUTPUT) -> object:
check.str_param(output_name, "output_name")
graph_def = self.job_def.graph
if not graph_def.has_output(output_name) and len(graph_def.output_mappings) == 0:
raise DagsterInvariantViolationError(
f"Attempted to retrieve top-level outputs for '{graph_def.name}', which has no"
" outputs."
)
elif not graph_def.has_output(output_name):
raise DagsterInvariantViolationError(
f"Could not find top-level output '{output_name}' in '{graph_def.name}'."
)
# Resolve the first layer of mapping
output_mapping = graph_def.get_output_mapping(output_name)
mapped_node = graph_def.node_named(output_mapping.maps_from.node_name)
origin_output_def, origin_handle = mapped_node.definition.resolve_output_to_origin(
output_mapping.maps_from.output_name,
NodeHandle(mapped_node.name, None),
)
# Get output from origin node
return self._get_output_for_handle(check.not_none(origin_handle), origin_output_def.name)
def output_for_node(self, node_str: str, output_name: str = DEFAULT_OUTPUT) -> object:
# resolve handle of node that node_str is referring to
target_handle = NodeHandle.from_string(node_str)
target_node_def = self.job_def.graph.get_node(target_handle).definition
origin_output_def, origin_handle = target_node_def.resolve_output_to_origin(
output_name, NodeHandle.from_string(node_str)
)
# retrieve output value from resolved handle
return self._get_output_for_handle(check.not_none(origin_handle), origin_output_def.name)
def filter_events(self, event_filter: Callable[[DagsterEvent], bool]) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event_filter(event)]
def events_for_node(self, node_name: str) -> Sequence[DagsterEvent]:
"""Retrieves all dagster events for a specific node.
Args:
node_name (str): The name of the node for which outputs should be retrieved.
Returns:
List[DagsterEvent]: A list of all dagster events associated with provided node name.
"""
check.str_param(node_name, "node_name")
return self._filter_events_by_handle(NodeHandle.from_string(node_name))
def is_node_success(self, node_str: str) -> bool:
return any(evt.is_step_success for evt in self.events_for_node(node_str))
def is_node_failed(self, node_str: str) -> bool:
return any(evt.is_step_failure for evt in self.events_for_node(node_str))
def is_node_skipped(self, node_str: str) -> bool:
return any(evt.is_step_skipped for evt in self.events_for_node(node_str))
def is_node_untouched(self, node_str: str) -> bool:
return len(self.events_for_node(node_str)) == 0
def get_run_failure_event(self) -> DagsterEvent:
"""Returns a DagsterEvent with type DagsterEventType.RUN_FAILURE if it ocurred during
execution.
"""
events = self.filter_events(
lambda event: event.event_type == DagsterEventType.PIPELINE_FAILURE
)
if len(events) == 0:
raise DagsterError("No event of type DagsterEventType.PIPELINE_FAILURE found.")
return events[0]
def get_run_success_event(self) -> DagsterEvent:
"""Returns a DagsterEvent with type DagsterEventType.RUN_SUCCESS if it ocurred during
execution.
"""
events = self.filter_events(
lambda event: event.event_type == DagsterEventType.PIPELINE_SUCCESS
)
if len(events) == 0:
raise DagsterError("No event of type DagsterEventType.PIPELINE_SUCCESS found.")
return events[0]
def asset_materializations_for_node(self, node_name: str) -> Sequence[AssetMaterialization]:
return [
cast("StepMaterializationData", event.event_specific_data).materialization
for event in self.events_for_node(node_name)
if event.event_type_value == DagsterEventType.ASSET_MATERIALIZATION.value
]
def asset_observations_for_node(self, node_name: str) -> Sequence[AssetObservation]:
return [
cast("AssetObservationData", event.event_specific_data).asset_observation
for event in self.events_for_node(node_name)
if event.event_type_value == DagsterEventType.ASSET_OBSERVATION.value
]
def get_asset_materialization_events(self) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event.is_step_materialization]
def get_asset_materialization_planned_events(self) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event.is_asset_materialization_planned]
def get_asset_observation_events(self) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event.is_asset_observation]
def get_asset_check_evaluations(self) -> Sequence[AssetCheckEvaluation]:
return [
cast("AssetCheckEvaluation", event.event_specific_data)
for event in self.all_events
if event.event_type_value == DagsterEventType.ASSET_CHECK_EVALUATION.value
]
def get_step_success_events(self) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event.is_step_success]
def get_step_skipped_events(self) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event.is_step_skipped]
def get_step_failure_events(self) -> Sequence[DagsterEvent]:
return [event for event in self.all_events if event.is_step_failure]
def get_failed_step_keys(self) -> AbstractSet[str]:
failure_events = self.filter_events(
lambda event: event.is_step_failure or event.is_resource_init_failure
)
keys: set[str] = set()
for event in failure_events:
if event.step_key:
keys.add(event.step_key)
return keys
def compute_events_for_handle(self, handle: NodeHandle) -> Sequence[DagsterEvent]:
return [
event
for event in self._filter_events_by_handle(handle)
if event.step_kind == StepKind.COMPUTE
]
def expectation_results_for_node(self, node_str: str) -> Sequence[ExpectationResult]:
handle = NodeHandle.from_string(node_str)
compute_events = self.compute_events_for_handle(handle)
expectation_result_events = list(
filter(
lambda event: event.event_type == DagsterEventType.STEP_EXPECTATION_RESULT,
compute_events,
)
)
return [
cast("StepExpectationResultData", event.event_specific_data).expectation_result
for event in expectation_result_events
]
def retry_attempts_for_node(self, node_str: str) -> int:
count = 0
for event in self.events_for_node(node_str):
if event.event_type == DagsterEventType.STEP_RESTARTED:
count += 1
return count
def failure_data_for_node(self, node_str: str) -> Optional[StepFailureData]:
for event in self.events_for_node(node_str):
if event.event_type == DagsterEventType.STEP_FAILURE:
return event.step_failure_data
return None
| ExecutionResult |
python | astral-sh__uv | scripts/scenarios/generate.py | {
"start": 1926,
"end": 11494
} | class ____(StrEnum):
install = auto()
compile = auto()
lock = auto()
def template_file(self) -> Path:
return TEMPLATES / f"{self.name}.mustache"
def test_file(self) -> Path:
match self.value:
case TemplateKind.install:
return TESTS / "pip_install_scenarios.rs"
case TemplateKind.compile:
return TESTS / "pip_compile_scenarios.rs"
case TemplateKind.lock:
return TESTS / "lock_scenarios.rs"
case _:
raise NotImplementedError()
def main(
scenarios: list[Path],
template_kinds: list[TemplateKind],
snapshot_update: bool = True,
):
# Fetch packse version
packse_version = importlib.metadata.version("packse")
debug = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
# Don't update the version to `0.0.0` to preserve the `UV_TEST_PACKSE_URL`
# in local tests.
if packse_version != "0.0.0":
update_common_mod_rs(packse_version)
if not scenarios:
if packse_version == "0.0.0":
path = packse.__development_base_path__ / "scenarios"
if path.exists():
logging.info(
"Detected development version of packse, using scenarios from %s",
path,
)
scenarios = [path]
else:
logging.error(
"No scenarios provided. Found development version of packse but is missing scenarios. Is it installed as an editable?"
)
sys.exit(1)
else:
logging.error("No scenarios provided, nothing to do.")
return
targets = []
for target in scenarios:
if target.is_dir():
targets.extend(target.glob("**/*.json"))
targets.extend(target.glob("**/*.toml"))
targets.extend(target.glob("**/*.yaml"))
else:
targets.append(target)
logging.info("Loading scenario metadata...")
data = packse.inspect.variables_for_templates(
targets=targets,
no_hash=True,
)
data["scenarios"] = [
scenario
for scenario in data["scenarios"]
# Drop example scenarios
if not scenario["name"].startswith("example")
]
# We have a mixture of long singe-line descriptions (json scenarios) we need to
# wrap and manually formatted markdown in toml and yaml scenarios we want to
# preserve.
for scenario in data["scenarios"]:
if scenario["_textwrap"]:
scenario["description"] = textwrap.wrap(scenario["description"], width=80)
else:
scenario["description"] = scenario["description"].splitlines()
# Don't drop empty lines like chevron would.
scenario["description"] = "\n/// ".join(scenario["description"])
# Apply the same wrapping to the expected explanation
for scenario in data["scenarios"]:
expected = scenario["expected"]
if explanation := expected["explanation"]:
if scenario["_textwrap"]:
expected["explanation"] = textwrap.wrap(explanation, width=80)
else:
expected["explanation"] = explanation.splitlines()
expected["explanation"] = "\n// ".join(expected["explanation"])
# Hack to track which scenarios require a specific Python patch version
for scenario in data["scenarios"]:
if "patch" in scenario["name"]:
scenario["python_patch"] = True
else:
scenario["python_patch"] = False
# Split scenarios into `install`, `compile` and `lock` cases
install_scenarios = []
compile_scenarios = []
lock_scenarios = []
for scenario in data["scenarios"]:
resolver_options = scenario["resolver_options"] or {}
# Avoid writing the empty `required-environments = []`
resolver_options["has_required_environments"] = bool(
resolver_options["required_environments"]
)
if resolver_options.get("universal"):
lock_scenarios.append(scenario)
elif resolver_options.get("python") is not None:
compile_scenarios.append(scenario)
else:
install_scenarios.append(scenario)
template_kinds_and_scenarios: list[tuple[TemplateKind, list[Any]]] = [
(TemplateKind.install, install_scenarios),
(TemplateKind.compile, compile_scenarios),
(TemplateKind.lock, lock_scenarios),
]
for template_kind, scenarios in template_kinds_and_scenarios:
if template_kind not in template_kinds:
continue
data = {"scenarios": scenarios}
ref = "HEAD" if packse_version == "0.0.0" else packse_version
# Add generated metadata
data["generated_from"] = (
f"https://github.com/astral-sh/packse/tree/{ref}/scenarios"
)
data["generated_with"] = "./scripts/sync_scenarios.sh"
data["vendor_links"] = (
f"https://raw.githubusercontent.com/astral-sh/packse/{ref}/vendor/links.html"
)
data["index_url"] = (
os.environ.get(
"UV_TEST_PACKSE_INDEX",
f"https://astral-sh.github.io/packse/{ref}",
)
+ "/simple-html"
)
# Render the template
logging.info(f"Rendering template {template_kind.name}")
output = chevron_blue.render(
template=template_kind.template_file().read_text(),
data=data,
no_escape=True,
warn=True,
)
# Update the test files
logging.info(
f"Updating test file at `{template_kind.test_file().relative_to(PROJECT_ROOT)}`...",
)
with open(template_kind.test_file(), "w") as test_file:
test_file.write(output)
# Format
logging.info(
"Formatting test file...",
)
subprocess.check_call(
["rustfmt", template_kind.test_file()],
stderr=subprocess.STDOUT,
stdout=sys.stderr if debug else subprocess.DEVNULL,
)
# Update snapshots
if snapshot_update:
logging.info("Updating snapshots...")
env = os.environ.copy()
command = [
"cargo",
"insta",
"test",
"--features",
"pypi,python,python-patch",
"--accept",
"--test-runner",
"nextest",
"--test",
"it",
"--",
template_kind.test_file().with_suffix("").name,
]
logging.debug(f"Running {' '.join(command)}")
exit_code = subprocess.call(
command,
cwd=PROJECT_ROOT,
stderr=subprocess.STDOUT,
stdout=sys.stderr if debug else subprocess.DEVNULL,
env=env,
)
if exit_code != 0:
logging.warning(
f"Snapshot update failed with exit code {exit_code} (use -v to show details)"
)
else:
logging.info("Skipping snapshot update")
logging.info("Done!")
def update_common_mod_rs(packse_version: str):
"""Update the value of `PACKSE_VERSION` used in non-scenario tests.
Example:
```rust
pub const PACKSE_VERSION: &str = "0.3.30";
```
"""
test_common = TESTS_COMMON_MOD_RS.read_text()
before_version = 'pub const PACKSE_VERSION: &str = "'
after_version = '";'
build_vendor_links_url = f"{before_version}{packse_version}{after_version}"
if build_vendor_links_url in test_common:
logging.info(f"Up-to-date: {TESTS_COMMON_MOD_RS}")
else:
logging.info(f"Updating: {TESTS_COMMON_MOD_RS}")
url_matcher = re.compile(
re.escape(before_version) + '[^"]+' + re.escape(after_version)
)
assert len(url_matcher.findall(test_common)) == 1, (
f"PACKSE_VERSION not found in {TESTS_COMMON_MOD_RS}"
)
test_common = url_matcher.sub(build_vendor_links_url, test_common)
TESTS_COMMON_MOD_RS.write_text(test_common)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generates and updates snapshot test cases from packse scenarios.",
)
parser.add_argument(
"scenarios",
type=Path,
nargs="*",
help="The scenario files to use",
)
parser.add_argument(
"--templates",
type=TemplateKind,
choices=list(TemplateKind),
default=list(TemplateKind),
nargs="*",
help="The templates to render. By default, all templates are rendered",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Enable debug logging",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Disable logging",
)
parser.add_argument(
"--no-snapshot-update",
action="store_true",
help="Disable automatic snapshot updates",
)
args = parser.parse_args()
if args.quiet:
log_level = logging.CRITICAL
elif args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level, format="%(message)s")
main(args.scenarios, args.templates, snapshot_update=not args.no_snapshot_update)
| TemplateKind |
python | tensorflow__tensorflow | tensorflow/cc/saved_model/testdata/generate_saved_models.py | {
"start": 2338,
"end": 2479
} | class ____(module.Module):
def __init__(self):
super(CyclicModule, self).__init__()
self.child = ReferencesParent(self)
| CyclicModule |
python | protocolbuffers__protobuf | python/google/protobuf/internal/testing_refleaks.py | {
"start": 681,
"end": 1186
} | class ____(unittest.TestResult):
"""A TestResult which forwards events to a parent object, except for Skips."""
def __init__(self, parent_result):
unittest.TestResult.__init__(self)
self.parent_result = parent_result
def addError(self, test, error):
self.parent_result.addError(test, error)
def addFailure(self, test, error):
self.parent_result.addFailure(test, error)
def addSkip(self, test, reason):
pass
def addDuration(self, test, duration):
pass
| LocalTestResult |
python | tensorflow__tensorflow | tensorflow/python/tools/saved_model_cli_test.py | {
"start": 2147,
"end": 47212
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super(SavedModelCLITestCase, self).setUp()
if platform.system() == 'Windows':
self.skipTest('Skipping failing tests on Windows.')
def _save_dummy_model(self, get_ops_mock):
class DummyModel(autotrackable.AutoTrackable):
"""Model with a callable concrete function."""
def __init__(self):
function = def_function.function(
self.multiply,
input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
])
self.pure_concrete_function = function.get_concrete_function()
def multiply(self, a, b):
return a * b
# Mocking _get_ops_in_metagraph because it returns a nondeterministically
# ordered set of ops.
get_ops_mock.return_value = {'Op1'}
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
with self.cached_session():
save.save(dummy_model, saved_model_dir)
return saved_model_dir
@test.mock.patch.object(saved_model_cli, '_get_ops_in_metagraph')
def testShowCommandAll(self, get_ops_mock):
# Mocking _get_ops_in_metagraph because it returns a nondeterministically
# ordered set of ops.
get_ops_mock.return_value = {'Op1', 'Op2', 'Op3'}
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
# pylint: disable=line-too-long
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['classify_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/classify
signature_def['classify_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/classify
signature_def['regress_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y2']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y2:0
Method name is: tensorflow/serving/regress
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict
The MetaGraph with tag set ['serve'] contains the following ops:"""
# pylint: enable=line-too-long
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(
['saved_model_cli', 'show', '--dir', base_path, '--all'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
get_ops_mock.assert_called_once()
output = out.getvalue().strip()
self.maxDiff = None # Produce useful error msg if the comparison fails
self.assertIn(exp_out, output)
self.assertIn('Op1', output)
self.assertIn('Op2', output)
self.assertIn('Op3', output)
self.assertEqual(err.getvalue().strip(), '')
@test.mock.patch.object(saved_model_cli, '_get_ops_in_metagraph')
def testShowAllWithFunctions(self, get_ops_mock):
class DummyModel(autotrackable.AutoTrackable):
"""Model with callable polymorphic functions specified."""
@def_function.function
def func1(self, a, b, c):
if c:
return a + b
else:
return a * b
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
def func2(self, x):
return x + 2
@def_function.function
def __call__(self, y, c=7):
return y + 2 * c
# Mocking _get_ops_in_metagraph because it returns a nondeterministically
# ordered set of ops.
get_ops_mock.return_value = {'Op1'}
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
# Call with specific values to create new polymorphic function traces.
dummy_model.func1(constant_op.constant(5), constant_op.constant(9), True)
dummy_model(constant_op.constant(5))
with self.cached_session():
save.save(dummy_model, saved_model_dir)
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: serving_default_x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
The MetaGraph with tag set ['serve'] contains the following ops: {'Op1'}
Concrete Functions:
Function Name: '__call__'
Option #1
Callable with:
Argument #1
y: TensorSpec(shape=(), dtype=tf.int32, name='y')
Argument #2
DType: int
Value: 7
Function Name: 'func1'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.int32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.int32, name='b')
Argument #3
DType: bool
Value: True
Function Name: 'func2'
Option #1
Callable with:
Argument #1
x: TensorSpec(shape=(2, 2), dtype=tf.float32, name='x')
""".strip() # pylint: enable=line-too-long
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(
['saved_model_cli', 'show', '--dir', saved_model_dir, '--all'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
@test.mock.patch.object(saved_model_cli, '_get_ops_in_metagraph')
def testShowAllWithPureConcreteFunction(self, get_ops_mock):
saved_model_dir = self._save_dummy_model(get_ops_mock)
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['a'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_a:0
inputs['b'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_b:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
The MetaGraph with tag set ['serve'] contains the following ops: {'Op1'}
Concrete Functions:
Function Name: 'pure_concrete_function'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.float32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.float32, name='b')
""".strip() # pylint: enable=line-too-long
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(
['saved_model_cli', 'show', '--dir', saved_model_dir, '--all'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandTags(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
exp_out = 'The given SavedModel contains the following tag-sets:\n\'serve\''
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(['saved_model_cli', 'show', '--dir', base_path])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
@test.mock.patch.object(saved_model_cli, '_get_ops_in_metagraph')
def testShowAllWithCustomOp(self, get_ops_mock):
saved_model_dir = self._save_dummy_model(get_ops_mock)
# Manually edit in a custom op into one of the functions
saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)
saved_model_proto.meta_graphs[0].graph_def.library.function[0].node_def.add(
name='TestCustomOp', op='CustomOp')
file_io.atomic_write_string_to_file(
os.path.join(saved_model_dir, 'saved_model.pb'),
saved_model_proto.SerializeToString(deterministic=True))
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(
['saved_model_cli', 'show', '--dir', saved_model_dir, '--all'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.assertIn('could not be listed due to the existence of custom ops',
output)
self.assertEqual(err.getvalue().strip(), '')
@test.mock.patch.object(saved_model_cli, '_get_ops_in_metagraph')
def testShowAllWithInvalidFuncGraph(self, get_ops_mock):
saved_model_dir = self._save_dummy_model(get_ops_mock)
# Manually edit one of the functions to make `tf.saved_model.load` fail.
saved_model_proto = loader_impl.parse_saved_model(saved_model_dir)
saved_model_proto.meta_graphs[0].graph_def.library.function[0].node_def.add(
name='TestInvalidOp', op='VarHandleOp')
file_io.atomic_write_string_to_file(
os.path.join(saved_model_dir, 'saved_model.pb'),
saved_model_proto.SerializeToString(deterministic=True))
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(
['saved_model_cli', 'show', '--dir', saved_model_dir, '--all'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.assertIn('could not be listed due to unknown reasons', output)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandSignature(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs '
'with the following keys:')
exp_start = 'SignatureDef key: '
exp_keys = [
'"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"',
'"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"'
]
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS(
['saved_model_cli', 'show', '--dir', base_path, '--tag_set', 'serve'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
# Order of signatures does not matter
self.assertMultiLineEqual(
output,
'\n'.join([exp_header] +
[exp_start + exp_key for exp_key in exp_keys]))
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandErrorNoTagSet(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', 'show', '--dir', base_path,
'--tag_set', 'badtagset'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaises(RuntimeError):
saved_model_cli.show()
def testShowCommandInputsOutputs(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
expected_output = (
'The given SavedModel SignatureDef contains the following input(s):\n'
' inputs[\'x\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: x:0\n'
'The given SavedModel SignatureDef contains the following output(s):\n'
' outputs[\'y\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: y:0\n'
'Method name is: tensorflow/serving/predict')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', 'show', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'serving_default'
])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.assertEqual(output, expected_output)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandListOps(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', 'show', '--dir', base_path, '--tag_set', 'serve',
'--list_ops'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.assertIn(
'The MetaGraph with tag set [\'serve\'] contains the following ops:',
output)
self.assertIn('\'VariableV2\'', output)
self.assertIn('\'Add\'', output)
self.assertIn('\'RestoreV2\'', output)
self.assertIn('\'ShardedFilename\'', output)
self.assertIn('\'Placeholder\'', output)
self.assertIn('\'Mul\'', output)
self.assertIn('\'Pack\'', output)
self.assertIn('\'Reshape\'', output)
self.assertIn('\'SaveV2\'', output)
self.assertIn('\'Const\'', output)
self.assertIn('\'Identity\'', output)
self.assertIn('\'Assign\'', output)
self.assertIn('\'ParseExample\'', output)
self.assertIn('\'StringJoin\'', output)
self.assertIn('\'MergeV2Checkpoints\'', output)
self.assertIn('\'NoOp\'', output)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandListOpsNoTags(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
exp_out = ('--list_ops must be paired with a tag-set or with --all.\n'
'The given SavedModel contains the following tag-sets:\n'
'\'serve\'').strip()
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', 'show', '--dir', base_path, '--list_ops'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, err):
saved_model_cli.show()
output = out.getvalue().strip()
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testPrintREFTypeTensor(self):
ref_tensor_info = meta_graph_pb2.TensorInfo(
dtype=types_pb2.DT_FLOAT_REF)
with captured_output() as (out, err):
saved_model_cli._print_tensor_info(ref_tensor_info)
self.assertIn('DT_FLOAT_REF', out.getvalue().strip())
self.assertEqual(err.getvalue().strip(), '')
def testInputPreProcessFormats(self):
input_str = 'input1=/path/file.txt[ab3];input2=file2'
input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]'
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string(
input_expr_str, safe=False)
self.assertEqual(input_dict['input1'], ('/path/file.txt', 'ab3'))
self.assertEqual(input_dict['input2'], ('file2', None))
print(input_expr_dict['input3'])
self.assertAllClose(input_expr_dict['input3'], np.zeros([2, 2]))
self.assertAllClose(input_expr_dict['input4'], [4, 5])
self.assertLen(input_dict, 2)
self.assertLen(input_expr_dict, 2)
def testInputPreProcessExamplesWithStrAndBytes(self):
input_examples_str = 'inputs=[{"text":["foo"], "bytes":[b"bar"]}]'
input_dict = saved_model_cli.preprocess_input_examples_arg_string(
input_examples_str)
feature = example_pb2.Example.FromString(input_dict['inputs'][0])
self.assertProtoEquals(
"""
features {
feature {
key: "bytes"
value {
bytes_list {
value: "bar"
}
}
}
feature {
key: "text"
value {
bytes_list {
value: "foo"
}
}
}
}
""", feature)
def testInputPreprocessExampleWithCodeInjection(self):
input_examples_str = 'inputs=os.system("echo hacked")'
with self.assertRaisesRegex(RuntimeError, 'not a valid python literal.'):
saved_model_cli.preprocess_input_examples_arg_string(input_examples_str)
def testInputPreProcessFileNames(self):
input_str = (r'inputx=C:\Program Files\data.npz[v:0];'
r'input:0=c:\PROGRA~1\data.npy')
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
self.assertEqual(input_dict['inputx'], (r'C:\Program Files\data.npz',
'v:0'))
self.assertEqual(input_dict['input:0'], (r'c:\PROGRA~1\data.npy', None))
def testInputPreProcessErrorBadFormat(self):
input_str = 'inputx=file[[v1]v2'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:file'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:np.zeros((5))'
with self.assertRaisesRegex(RuntimeError, 'format is incorrect'):
saved_model_cli.preprocess_input_exprs_arg_string(input_str, safe=False)
def testInputParserNPY(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(6)).reshape(2, 3)
input0_path = os.path.join(test.get_temp_dir(), 'input0.npy')
input1_path = os.path.join(test.get_temp_dir(), 'input1.npy')
np.save(input0_path, x0)
np.save(input1_path, x1)
input_str = 'x0=' + input0_path + '[x0];x1=' + input1_path
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x0'] == x0))
self.assertTrue(np.all(feed_dict['x1'] == x1))
def testInputParserNPZ(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
input_str = 'x=' + input_path + '[a];y=' + input_path
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x'] == x0))
self.assertTrue(np.all(feed_dict['y'] == x0))
def testInputParserPickle(self):
pkl0 = {'a': 5, 'b': np.array(range(4))}
pkl1 = np.array([1])
pkl2 = np.array([[1], [3]])
input_path0 = os.path.join(test.get_temp_dir(), 'pickle0.pkl')
input_path1 = os.path.join(test.get_temp_dir(), 'pickle1.pkl')
input_path2 = os.path.join(test.get_temp_dir(), 'pickle2.pkl')
with open(input_path0, 'wb') as f:
pickle.dump(pkl0, f)
with open(input_path1, 'wb') as f:
pickle.dump(pkl1, f)
with open(input_path2, 'wb') as f:
pickle.dump(pkl2, f)
input_str = 'x=' + input_path0 + '[b];y=' + input_path1 + '[c];'
input_str += 'z=' + input_path2
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x'] == pkl0['b']))
self.assertTrue(np.all(feed_dict['y'] == pkl1))
self.assertTrue(np.all(feed_dict['z'] == pkl2))
def testInputParserErrorNoName(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(5))
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0, b=x1)
input_str = 'x=' + input_path
with self.assertRaises(RuntimeError):
saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '')
def testInputParserErrorWrongName(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(5))
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0, b=x1)
input_str = 'x=' + input_path + '[c]'
with self.assertRaises(RuntimeError):
saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '')
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInputExamples(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(),
'input_examples' + ('tfrt' if use_tfrt else ''))
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'regress_x_to_y', '--input_examples',
'inputs=[{"x":[8.0],"x2":[5.0]}, {"x":[4.0],"x2":[3.0]}]',
'--outdir', output_dir
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
saved_model_cli.run()
y_actual = np.load(os.path.join(output_dir, 'outputs.npy'))
y_expected = np.array([[6.0], [4.0]])
self.assertAllEqual(y_expected, y_actual)
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandLongInputExamples(self, use_tfrt):
class DummyModel(autotrackable.AutoTrackable):
"""Model with callable polymorphic functions specified."""
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.string),
])
def func(self, inputs):
ex = parsing_ops.parse_example(serialized=inputs, features={
'variable0': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable1': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable2': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable3': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable4': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable5': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable6': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable7': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable8': parsing_config.FixedLenFeature(
(), dtypes.float32),
'variable9': parsing_config.FixedLenFeature(
(), dtypes.float32),
})
return {'outputs': sum(ex.values())}
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
func = getattr(dummy_model, 'func')
with self.cached_session():
save.save(dummy_model, saved_model_dir, signatures={'func': func})
output_dir = os.path.join(
test.get_temp_dir(),
'long_input_examples' + ('tfrt' if use_tfrt else ''))
saved_model_cli.flags.FLAGS.unparse_flags()
input_examples = (
'inputs=[{"variable0":[0.0],"variable1":[1.0],"variable2":[2.0],'
'"variable3":[3.0],"variable4":[4.0],"variable5":[5.0],'
'"variable6":[6.0],"variable7":[7.0],"variable8":[8.0],'
'"variable9":[9.0]}, {"variable0":[10.0],"variable1":[1.0],'
'"variable2":[2.0],"variable3":[3.0],"variable4":[4.0],'
'"variable5":[5.0],"variable6":[6.0],"variable7":[7.0],'
'"variable8":[8.0],"variable9":[9.0]}]')
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', saved_model_dir, '--tag_set', 'serve',
'--signature_def', 'func', '--input_examples', input_examples,
'--outdir', output_dir
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
saved_model_cli.run()
y_actual = np.load(os.path.join(output_dir, 'outputs.npy'))
y_expected = np.array([45.0, 55.0])
self.assertAllEqual(y_expected, y_actual)
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandExistingOutdir(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
input_path = os.path.join(test.get_temp_dir(), 'testRunCommand_inputs.npz')
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'outputs.npy')
if os.path.exists(output_file):
os.remove(output_file)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'regress_x2_to_y3', '--inputs',
'inputs=' + input_path + '[x0]', '--outdir', test.get_temp_dir()
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
saved_model_cli.run()
y_actual = np.load(output_file)
y_expected = np.array([[3.5], [4.0]])
self.assertAllClose(y_expected, y_actual)
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandNewOutdir(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandNewOutdir_inputs.npz')
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
np.savez(input_path, x0=x, x1=x_notused)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'serving_default', '--inputs', 'x=' +
input_path + '[x0]', '--outdir', output_dir
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
saved_model_cli.run()
y_actual = np.load(os.path.join(output_dir, 'y.npy'))
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandOutOverwrite(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandOutOverwrite_inputs.npz')
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'y.npy')
open(output_file, 'a').close()
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'serving_default', '--inputs', 'x=' +
input_path + '[x0]', '--outdir', test.get_temp_dir(),
'--overwrite'
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
saved_model_cli.run()
y_actual = np.load(output_file)
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInvalidInputKeyError(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'regress_x2_to_y3',
'--input_exprs', 'x2=[1,2,3]'
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaises(ValueError):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInvalidSignature(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'INVALID_SIGNATURE',
'--input_exprs', 'x2=[1,2,3]'
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaisesRegex(ValueError,
'Could not find signature '
'"INVALID_SIGNATURE"'):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInputExamplesNotListError(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'regress_x_to_y',
'--input_examples', 'inputs={"x":8.0,"x2":5.0}',
'--outdir', output_dir
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaisesRegex(ValueError, 'must be a list'):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInputExamplesFeatureValueNotListError(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'regress_x_to_y',
'--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]',
'--outdir', output_dir
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaisesRegex(ValueError, 'feature value must be a list'):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInputExamplesFeatureBadType(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'regress_x_to_y',
'--input_examples', 'inputs=[{"x":[[1],[2]]}]',
'--outdir', output_dir
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaisesRegex(ValueError, 'is not supported'):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandOutputFileExistError(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandOutOverwrite_inputs.npz')
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'y.npy')
open(output_file, 'a').close()
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'serving_default', '--inputs', 'x=' +
input_path + '[x0]', '--outdir', test.get_temp_dir()
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaises(RuntimeError):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandInputNotGivenError(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'serving_default'
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaises(AttributeError):
saved_model_cli.run()
@parameterized.named_parameters(('non_tfrt', False))
def testRunCommandWithDebuggerEnabled(self, use_tfrt):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandNewOutdir_inputs.npz')
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
np.savez(input_path, x0=x, x1=x_notused)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'run', '--dir', base_path, '--tag_set', 'serve',
'--signature_def', 'serving_default', '--inputs', 'x=' +
input_path + '[x0]', '--outdir', output_dir, '--tf_debug'
] + (['--use_tfrt'] if use_tfrt else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
def fake_wrapper_session(sess):
return sess
with test.mock.patch.object(
local_cli_wrapper,
'LocalCLIDebugWrapperSession',
side_effect=fake_wrapper_session,
autospec=True) as fake:
saved_model_cli.run()
fake.assert_called_with(test.mock.ANY)
y_actual = np.load(os.path.join(output_dir, 'y.npy'))
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testScanCommand(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', 'scan', '--dir', base_path])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, _):
saved_model_cli.scan()
output = out.getvalue().strip()
self.assertIn(('MetaGraph with tag set [\'serve\'] does not contain the '
'default denylisted ops: {\''), output)
self.assertIn('\'ReadFile\'', output)
self.assertIn('\'WriteFile\'', output)
self.assertIn('\'PrintV2\'', output)
def testScanCommandFoundCustomDenylistedOp(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'scan', '--dir', base_path, '--tag_set', 'serve', '--op_denylist',
'VariableV2,Assign,Relu6'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with captured_output() as (out, _):
saved_model_cli.scan()
output = out.getvalue().strip()
self.assertIn(('MetaGraph with tag set [\'serve\'] contains the following'
' denylisted ops:'), output)
self.assertTrue(('{\'VariableV2\', \'Assign\'}' in output) or
('{\'Assign\', \'VariableV2\'}' in output))
def testAOTCompileCPUWrongSignatureDefKey(self):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli',
'aot_compile_cpu', '--dir', base_path, '--tag_set', 'serve',
'--output_prefix', output_dir, '--cpp_class', 'Compiled',
'--signature_def_key', 'MISSING'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with self.assertRaisesRegex(ValueError, 'Unable to find signature_def'):
saved_model_cli.aot_compile_cpu()
class AOTCompileDummyModel(autotrackable.AutoTrackable):
"""Model compatible with XLA compilation."""
def __init__(self):
self.var = variables.Variable(1.0, name='my_var')
self.write_var = variables.Variable(1.0, name='write_var')
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
# Test unused inputs.
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func2(self, x, y):
del y
return {'res': x + self.var}
@def_function.function(input_signature=[
# Test large inputs.
tensor_spec.TensorSpec(shape=(2048, 16), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func3(self, x, y):
del y
return {'res': x + self.var}
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func_write(self, x, y):
del y
self.write_var.assign(x + self.var)
return {'res': self.write_var}
@parameterized.named_parameters(
('VariablesToFeedNone', '', 'func2', 'x86_64-pc-linux'),
(
'VariablesToFeedNoneTargetAarch64Linux',
'',
'func2',
'aarch64-none-linux-gnu',
),
(
'VariablesToFeedNoneTargetAarch64Android',
'',
'func2',
'aarch64-none-android',
),
('VariablesToFeedAll', 'all', 'func2', 'x86_64-pc-linux'),
('VariablesToFeedMyVar', 'my_var', 'func2', 'x86_64-pc-linux'),
('VariablesToFeedNoneLargeConstant', '', 'func3', 'x86_64-pc-linux'),
('WriteToWriteVar', 'all', 'func_write', 'x86_64-pc-linux'),
)
def testAOTCompileCPUFreezesAndCompiles(self, variables_to_feed, func,
target_triple):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
if not test.is_cpu_target_available(target_triple.partition('-')[0]):
self.skipTest('Skipping test because target support is not compiled in.')
if platform.machine() == 's390x' and 'aarch64' in str(target_triple):
self.skipTest('Skipping arm tests on s390x.')
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = self.AOTCompileDummyModel()
func = getattr(dummy_model, func)
with self.cached_session():
self.evaluate(dummy_model.var.initializer)
self.evaluate(dummy_model.write_var.initializer)
save.save(dummy_model, saved_model_dir, signatures={'func': func})
output_prefix = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir/out')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', # Use the default serving signature_key.
'aot_compile_cpu', '--dir', saved_model_dir, '--tag_set', 'serve',
'--signature_def_key', 'func', '--output_prefix', output_prefix,
'--variables_to_feed', variables_to_feed,
'--cpp_class', 'Generated'
] + (['--target_triple', target_triple] if target_triple else []))
parser = saved_model_cli.create_parser()
parser.parse_args()
with test.mock.patch.object(logging, 'warn') as captured_warn:
saved_model_cli.aot_compile_cpu()
self.assertRegex(
str(captured_warn.call_args),
'Signature input key \'y\'.*has been pruned while freezing the '
'graph.')
self.assertTrue(file_io.file_exists('{}.o'.format(output_prefix)))
self.assertTrue(file_io.file_exists('{}.h'.format(output_prefix)))
self.assertTrue(file_io.file_exists(
'{}_metadata.o'.format(output_prefix)))
self.assertTrue(
file_io.file_exists('{}_makefile.inc'.format(output_prefix)))
header_contents = file_io.read_file_to_string(
'{}.h'.format(output_prefix))
self.assertIn('class Generated', header_contents)
self.assertIn('arg_feed_x_data', header_contents)
self.assertIn('result_fetch_res_data', header_contents)
# arg_y got filtered out as it's not used by the output.
self.assertNotIn('arg_feed_y_data', header_contents)
if variables_to_feed:
# Read-only-variables' setters preserve constness.
self.assertIn('set_var_param_my_var_data(const float', header_contents)
self.assertNotIn('set_var_param_my_var_data(float', header_contents)
if func == dummy_model.func_write: # pylint: disable=comparison-with-callable
# Writeable variables setters do not preserve constness.
self.assertIn('set_var_param_write_var_data(float', header_contents)
self.assertNotIn('set_var_param_write_var_data(const float',
header_contents)
makefile_contents = file_io.read_file_to_string(
'{}_makefile.inc'.format(output_prefix))
self.assertIn('-D_GLIBCXX_USE_CXX11_ABI=', makefile_contents)
def testFreezeModel(self):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = self.AOTCompileDummyModel()
func = getattr(dummy_model, 'func2')
with self.cached_session():
self.evaluate(dummy_model.var.initializer)
self.evaluate(dummy_model.write_var.initializer)
save.save(dummy_model, saved_model_dir, signatures={'func': func})
output_prefix = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir/out')
saved_model_cli.flags.FLAGS.unparse_flags()
saved_model_cli.flags.FLAGS([
'saved_model_cli', # Use the default seving signature_key.
'freeze_model', '--dir', saved_model_dir, '--tag_set', 'serve',
'--signature_def_key', 'func', '--output_prefix', output_prefix,
'--variables_to_feed', 'all'])
parser = saved_model_cli.create_parser()
parser.parse_args()
with test.mock.patch.object(logging, 'warn'):
saved_model_cli.freeze_model()
self.assertTrue(
file_io.file_exists(os.path.join(output_prefix, 'frozen_graph.pb')))
self.assertTrue(
file_io.file_exists(os.path.join(output_prefix, 'config.pbtxt')))
if __name__ == '__main__':
test.main()
| SavedModelCLITestCase |
python | astropy__astropy | astropy/modeling/tests/test_parameters.py | {
"start": 4561,
"end": 4680
} | class ____(Model):
m1a = Parameter(default=1.0)
m1b = Parameter(default=5.0)
def evaluate():
pass
| M1 |
python | modin-project__modin | modin/config/envvars.py | {
"start": 28646,
"end": 28881
} | class ____(EnvironmentVariable, type=ExactStr):
"""Allows to select a library that we will use for testing performance."""
varname = "MODIN_ASV_USE_IMPL"
choices = ("modin", "pandas")
default = "modin"
| AsvImplementation |
python | ansible__ansible | test/integration/targets/templating/filter_plugins/broken_filter.py | {
"start": 37,
"end": 134
} | class ____:
@property
def accept_args_markers(self):
raise Exception('boom')
| Broken |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.