language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | realpython__materials | flask-connexion-rest-part-4/models.py | {
"start": 571,
"end": 904
} | class ____(db.Model):
__tablename__ = "note"
note_id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey("person.person_id"))
content = db.Column(db.String, nullable=False)
timestamp = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow
)
| Note |
python | pytorch__pytorch | torch/_dynamo/variables/nn_module.py | {
"start": 54739,
"end": 55075
} | class ____(UnspecializedNNModuleVariable):
"""
Differentiates between builtin nn modules (e.g. torch.nn.Linear) and user defined nn modules.
"""
def _wrap_source(self, attr_source):
# vt is already wrapped with the UnspecializedBuiltinNNModuleSource
return attr_source
| UnspecializedBuiltinNNModuleVariable |
python | pytorch__pytorch | torch/backends/__init__.py | {
"start": 1319,
"end": 1519
} | class ____(types.ModuleType):
def __init__(self, m, name):
super().__init__(name)
self.m = m
def __getattr__(self, attr):
return self.m.__getattribute__(attr)
| PropModule |
python | matplotlib__matplotlib | lib/matplotlib/legend_handler.py | {
"start": 15587,
"end": 18228
} | class ____(HandlerNpointsYoffsets):
r"""Handler for `.RegularPolyCollection`\s."""
def __init__(self, yoffsets=None, sizes=None, **kwargs):
super().__init__(yoffsets=yoffsets, **kwargs)
self._sizes = sizes
def get_numpoints(self, legend):
if self._numpoints is None:
return legend.scatterpoints
else:
return self._numpoints
def get_sizes(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._sizes is None:
handle_sizes = orig_handle.get_sizes()
if not len(handle_sizes):
handle_sizes = [1]
size_max = max(handle_sizes) * legend.markerscale ** 2
size_min = min(handle_sizes) * legend.markerscale ** 2
numpoints = self.get_numpoints(legend)
if numpoints < 4:
sizes = [.5 * (size_max + size_min), size_max,
size_min][:numpoints]
else:
rng = (size_max - size_min)
sizes = rng * np.linspace(0, 1, numpoints) + size_min
else:
sizes = self._sizes
return sizes
def update_prop(self, legend_handle, orig_handle, legend):
self._update_prop(legend_handle, orig_handle)
legend_handle.set_figure(legend.get_figure(root=False))
# legend._set_artist_props(legend_handle)
legend_handle.set_clip_box(None)
legend_handle.set_clip_path(None)
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
return type(orig_handle)(
orig_handle.get_numsides(),
rotation=orig_handle.get_rotation(), sizes=sizes,
offsets=offsets, offset_transform=offset_transform,
)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
# docstring inherited
xdata, xdata_marker = self.get_xdata(legend, xdescent, ydescent,
width, height, fontsize)
ydata = self.get_ydata(legend, xdescent, ydescent,
width, height, fontsize)
sizes = self.get_sizes(legend, orig_handle, xdescent, ydescent,
width, height, fontsize)
p = self.create_collection(
orig_handle, sizes,
offsets=list(zip(xdata_marker, ydata)), offset_transform=trans)
self.update_prop(p, orig_handle, legend)
p.set_offset_transform(trans)
return [p]
| HandlerRegularPolyCollection |
python | kamyu104__LeetCode-Solutions | Python/climbing-stairs-ii.py | {
"start": 34,
"end": 384
} | class ____(object):
def climbStairs(self, n, costs):
"""
:type n: int
:type costs: List[int]
:rtype: int
"""
a, b, c = float("inf"), float("inf"), 0
for i in xrange(n):
a, b, c = b, c, costs[i]+min(a+3**2, b+2**2, c+1**2)
return c
# Time: O(n)
# Space: O(n)
# dp
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/query.py | {
"start": 118118,
"end": 119567
} | class ____:
"""State used for the orm.Query version of update() / delete().
This object is now specific to Query only.
"""
def __init__(self, query: Query[Any]):
self.query = query.enable_eagerloads(False)
self._validate_query_state()
self.mapper = self.query._entity_from_pre_ent_zero()
def _validate_query_state(self) -> None:
for attr, methname, notset, op in (
("_limit_clause", "limit()", None, operator.is_),
("_offset_clause", "offset()", None, operator.is_),
("_order_by_clauses", "order_by()", (), operator.eq),
("_group_by_clauses", "group_by()", (), operator.eq),
("_distinct", "distinct()", False, operator.is_),
(
"_from_obj",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
(
"_setup_joins",
"join(), outerjoin(), select_from(), or from_self()",
(),
operator.eq,
),
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" % (methname,)
)
@property
def session(self) -> Session:
return self.query.session
| BulkUD |
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 26459,
"end": 34354
} | class ____(SplinterPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.splinter = SplinterModel(config)
self.splinter_qass = QuestionAwareSpanSelectionHead(config)
self.question_token_id = config.question_token_id
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
question_positions: Optional[torch.LongTensor] = None,
) -> Union[tuple, SplinterForPreTrainingOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_questions, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `batch_size, num_questions, sequence_length`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `batch_size, num_questions, sequence_length`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*):
The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size,
num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be
the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size,
sequence_length)`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if question_positions is None and start_positions is not None and end_positions is not None:
raise TypeError("question_positions must be specified in order to calculate the loss")
elif question_positions is None and input_ids is None:
raise TypeError("question_positions must be specified when input_embeds is used")
elif question_positions is None:
question_positions = self._prepare_question_positions(input_ids)
outputs = self.splinter(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
batch_size, sequence_length, dim = sequence_output.size()
# [batch_size, num_questions, sequence_length]
start_logits, end_logits = self.splinter_qass(sequence_output, question_positions)
num_questions = question_positions.size(1)
if attention_mask is not None:
attention_mask_for_each_question = attention_mask.unsqueeze(1).expand(
batch_size, num_questions, sequence_length
)
start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min
end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min
total_loss = None
# [batch_size, num_questions, sequence_length]
if start_positions is not None and end_positions is not None:
# sometimes the start/end positions are outside our model inputs, we ignore these terms
start_positions.clamp_(0, max(0, sequence_length - 1))
end_positions.clamp_(0, max(0, sequence_length - 1))
# Ignore zero positions in the loss. Splinter never predicts zero
# during pretraining and zero is used for padding question
# tokens as well as for start and end positions of padded
# question tokens.
loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id)
start_loss = loss_fct(
start_logits.view(batch_size * num_questions, sequence_length),
start_positions.view(batch_size * num_questions),
)
end_loss = loss_fct(
end_logits.view(batch_size * num_questions, sequence_length),
end_positions.view(batch_size * num_questions),
)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return SplinterForPreTrainingOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor:
rows, flat_positions = torch.where(input_ids == self.config.question_token_id)
num_questions = torch.bincount(rows)
positions = torch.full(
(input_ids.size(0), num_questions.max()),
self.config.pad_token_id,
dtype=torch.long,
device=input_ids.device,
)
cols = torch.cat([torch.arange(n) for n in num_questions])
positions[rows, cols] = flat_positions
return positions
__all__ = [
"SplinterForQuestionAnswering",
"SplinterForPreTraining",
"SplinterLayer",
"SplinterModel",
"SplinterPreTrainedModel",
]
| SplinterForPreTraining |
python | dagster-io__dagster | python_modules/dagster/dagster/components/utils/defs_state.py | {
"start": 262,
"end": 2033
} | class ____(Model):
key: Optional[str] = Field(
default=None, description="The key for the state. This must be unique per deployment."
)
management_type: DefsStateManagementType = Field(
description="The storage type for state required for loading this object's definitions."
" - `LOCAL_FILESYSTEM`: State is stored on the local filesystem. `dg utils refresh-defs-state` must be executed while building the deployed container image in order for state to be accessible."
" - `VERSIONED_STATE_STORAGE`: State is stored in your configured `defs_state_storage`. `dg utils refresh-defs-state` may be executed at any time to refresh the state."
" - `LEGACY_CODE_SERVER_SNAPSHOTS`: State is stored in memory in the code server. State is always automatically refreshed when the code server is loaded.",
examples=[
DefsStateManagementType.LOCAL_FILESYSTEM.value,
DefsStateManagementType.VERSIONED_STATE_STORAGE.value,
DefsStateManagementType.LEGACY_CODE_SERVER_SNAPSHOTS.value,
],
)
refresh_if_dev: bool = Field(
default=True,
description="Whether to automatically refresh defs state when using `dagster dev` or the `dg` cli.",
)
@classmethod
def local_filesystem(cls) -> "DefsStateConfigArgs":
return cls(management_type=DefsStateManagementType.LOCAL_FILESYSTEM)
@classmethod
def versioned_state_storage(cls) -> "DefsStateConfigArgs":
return cls(management_type=DefsStateManagementType.VERSIONED_STATE_STORAGE)
@classmethod
def legacy_code_server_snapshots(cls) -> "DefsStateConfigArgs":
return cls(management_type=DefsStateManagementType.LEGACY_CODE_SERVER_SNAPSHOTS)
@record
| DefsStateConfigArgs |
python | ansible__ansible | test/integration/targets/old_style_vars_plugins/vars_plugins/require_enabled.py | {
"start": 86,
"end": 238
} | class ____(BaseVarsPlugin):
REQUIRES_ENABLED = True
def get_vars(self, loader, path, entities):
return {'require_enabled': True}
| VarsModule |
python | pytorch__pytorch | torch/optim/lr_scheduler.py | {
"start": 25654,
"end": 29251
} | class ____(LRScheduler):
"""Decays the learning rate of each parameter group by gamma once the number of epoch reaches one of the milestones.
Notice that such decay can happen simultaneously with other changes to the learning rate
from outside this scheduler. When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
milestones (list): List of epoch indices. Must be increasing.
gamma (float): Multiplicative factor of learning rate decay.
Default: 0.1.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # xdoctest: +SKIP
>>> # Assuming optimizer uses lr = 0.05 for all groups
>>> # lr = 0.05 if epoch < 30
>>> # lr = 0.005 if 30 <= epoch < 80
>>> # lr = 0.0005 if epoch >= 80
>>> scheduler = MultiStepLR(optimizer, milestones=[30, 80], gamma=0.1)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
.. image:: ../scripts/lr_scheduler_images/MultiStepLR.png
"""
def __init__(
self,
optimizer: Optimizer,
milestones: Iterable[int],
gamma: float = 0.1,
last_epoch: int = -1,
) -> None: # noqa: D107
self.milestones = Counter(milestones)
self.gamma = gamma
super().__init__(optimizer, last_epoch)
@override
def get_lr(self) -> list[float | Tensor]:
r"""Compute the next learning rate for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups`.
If the current epoch is in :attr:`milestones`, decays the
``group["lr"]``\s in the optimizer's
:attr:`~torch.optim.Optimizer.param_groups` by :attr:`gamma`.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
.. note::
If you're trying to inspect the most recent learning rate, use
:meth:`get_last_lr()` instead.
.. note::
The returned :class:`~torch.Tensor`\s are copies, and never alias
the optimizer's ``group["lr"]``\s.
.. note::
If the current epoch appears in :attr:`milestones` ``n`` times, we
scale by :attr:`gamma` to the power of ``n``
"""
_warn_get_lr_called_within_step(self)
if self.last_epoch not in self.milestones:
return _param_groups_val_list(self.optimizer, "lr")
return [
group["lr"] * self.gamma ** self.milestones[self.last_epoch]
for group in self.optimizer.param_groups
]
def _get_closed_form_lr(self):
r"""Compute learning rates for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups` at :attr:`last_epoch` using
a closed-form formula.
Uses :attr:`base_lrs` to compute learning rates. This method is called
when an epoch is passed to :meth:`step`.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
"""
milestones = sorted(self.milestones.elements())
return [
base_lr * self.gamma ** bisect_right(milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| MultiStepLR |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 33963,
"end": 34670
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LegacyDebertaV2PredictionHeadTransform(config)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(self.embedding_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
| LegacyDebertaV2LMPredictionHead |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/gcs/io_manager.py | {
"start": 6027,
"end": 8881
} | class ____(GCSPickleIOManager):
"""Renamed to GCSPickleIOManager. See GCSPickleIOManager for documentation."""
pass
@dagster_maintained_io_manager
@io_manager(
config_schema=GCSPickleIOManager.to_config_schema(),
required_resource_keys={"gcs"},
)
def gcs_pickle_io_manager(init_context):
"""Persistent IO manager using GCS for storage.
Serializes objects via pickling. Suitable for objects storage for distributed executors, so long
as each execution node has network connectivity and credentials for GCS and the backing bucket.
Assigns each op output to a unique filepath containing run ID, step key, and output name.
Assigns each asset to a single filesystem path, at ``<base_dir>/<asset_key>``. If the asset key
has multiple components, the final component is used as the name of the file, and the preceding
components as parent directories under the base_dir.
Subsequent materializations of an asset will overwrite previous materializations of that asset.
With a base directory of ``/my/base/path``, an asset with key
``AssetKey(["one", "two", "three"])`` would be stored in a file called ``three`` in a directory
with path ``/my/base/path/one/two/``.
Example usage:
1. Attach this IO manager to a set of assets.
.. code-block:: python
from dagster import Definitions, asset
from dagster_gcp.gcs import gcs_pickle_io_manager, gcs_resource
@asset
def asset1():
# create df ...
return df
@asset
def asset2(asset1):
return asset1[:5]
Definitions(
assets=[asset1, asset2],
resources={
"io_manager": gcs_pickle_io_manager.configured(
{"gcs_bucket": "my-cool-bucket", "gcs_prefix": "my-cool-prefix"}
),
"gcs": gcs_resource.configured({"project": "my-cool-project"}),
},
)
2. Attach this IO manager to your job to make it available to your ops.
.. code-block:: python
from dagster import job
from dagster_gcp.gcs import gcs_pickle_io_manager, gcs_resource
@job(
resource_defs={
"io_manager": gcs_pickle_io_manager.configured(
{"gcs_bucket": "my-cool-bucket", "gcs_prefix": "my-cool-prefix"}
),
"gcs": gcs_resource.configured({"project": "my-cool-project"}),
},
)
def my_job():
...
"""
client = init_context.resources.gcs
pickled_io_manager = PickledObjectGCSIOManager(
bucket=init_context.resource_config["gcs_bucket"],
client=client,
prefix=init_context.resource_config["gcs_prefix"],
)
return pickled_io_manager
| ConfigurablePickledObjectGCSIOManager |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_param.py | {
"start": 314,
"end": 2207
} | class ____(TypedDict, total=False):
id: str
"""
The unique ID of the item, this can be generated by the client to help manage
server-side context, but is not required because the server will generate one if
not provided.
"""
arguments: str
"""The arguments of the function call (for `function_call` items)."""
call_id: str
"""
The ID of the function call (for `function_call` and `function_call_output`
items). If passed on a `function_call_output` item, the server will check that a
`function_call` item with the same ID exists in the conversation history.
"""
content: Iterable[ConversationItemContentParam]
"""The content of the message, applicable for `message` items.
- Message items of role `system` support only `input_text` content
- Message items of role `user` support `input_text` and `input_audio` content
- Message items of role `assistant` support `text` content.
"""
name: str
"""The name of the function being called (for `function_call` items)."""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`."""
output: str
"""The output of the function call (for `function_call_output` items)."""
role: Literal["user", "assistant", "system"]
"""
The role of the message sender (`user`, `assistant`, `system`), only applicable
for `message` items.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item (`completed`, `incomplete`, `in_progress`).
These have no effect on the conversation, but are accepted for consistency with
the `conversation.item.created` event.
"""
type: Literal["message", "function_call", "function_call_output"]
"""The type of the item (`message`, `function_call`, `function_call_output`)."""
| ConversationItemParam |
python | HIPS__autograd | autograd/builtins.py | {
"start": 3502,
"end": 3613
} | class ____(type_):
def __instancecheck__(self, instance):
return isinstance(instance, list_)
| ListMeta |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum1.py | {
"start": 5945,
"end": 6222
} | class ____(Enum):
a = 1
b = lambda self: None
c = func3
reveal_type(TestEnum12.a, expected_text="Literal[TestEnum12.a]")
reveal_type(TestEnum12.b, expected_text="(self: Unknown) -> None")
reveal_type(TestEnum12.c, expected_text="(self: Unknown) -> None")
| TestEnum12 |
python | crytic__slither | slither/vyper_parsing/variables/state_variable.py | {
"start": 263,
"end": 1293
} | class ____:
def __init__(self, variable: StateVariable, variable_data: VariableDecl) -> None:
self._variable: StateVariable = variable
self._variable.name = variable_data.target.id
self._variable.is_constant = variable_data.is_constant
self._variable.is_immutable = variable_data.is_immutable
self._variable.visibility = "public" if variable_data.is_public else "internal"
self._elem_to_parse = variable_data.annotation
if variable_data.value is not None:
self._variable.initialized = True
self._initializedNotParsed = variable_data.value
@property
def underlying_variable(self) -> StateVariable:
return self._variable
def analyze(self, contract) -> None:
self._variable.type = parse_type(self._elem_to_parse, contract)
if self._variable.initialized:
self._variable.expression = parse_expression(self._initializedNotParsed, contract)
self._initializedNotParsed = None
| StateVariableVyper |
python | huggingface__transformers | src/transformers/models/plbart/modeling_plbart.py | {
"start": 20130,
"end": 25195
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: PLBartConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PLBartAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PLBartAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
| PLBartDecoderLayer |
python | huggingface__transformers | src/transformers/models/modernbert_decoder/modular_modernbert_decoder.py | {
"start": 12848,
"end": 14045
} | class ____(ModernBertRotaryEmbedding):
pass
def eager_attention_forward(
module: "ModernBertDecoderAttention",
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
sliding_window: Optional[int] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""A simple eager attention implementation for ModernBERT decoder."""
if scaling is None:
scaling = module.head_dim**-0.5
# Compute attention scores
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
# Use the pre-computed attention mask
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| ModernBertDecoderRotaryEmbedding |
python | scikit-learn__scikit-learn | sklearn/model_selection/_split.py | {
"start": 72021,
"end": 75728
} | class ____(_UnsupportedGroupCVMixin, BaseShuffleSplit):
"""Random permutation cross-validator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that test sets across all folds will be mutually exclusive,
and might include overlapping samples. However, this is still very likely for
sizeable datasets.
Read more in the :ref:`User Guide <ShuffleSplit>`.
For visualisation of cross-validation behaviour and
comparison between common scikit-learn split methods
refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py`
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0)
>>> rs.get_n_splits()
5
>>> print(rs)
ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None)
>>> for i, (train_index, test_index) in enumerate(rs.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3 0 4]
Test: index=[5 2]
Fold 1:
Train: index=[4 0 2 5]
Test: index=[1 3]
Fold 2:
Train: index=[1 2 4 0]
Test: index=[3 5]
Fold 3:
Train: index=[3 4 1 0]
Test: index=[5 2]
Fold 4:
Train: index=[3 5 1 0]
Test: index=[2 4]
>>> # Specify train and test size
>>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25,
... random_state=0)
>>> for i, (train_index, test_index) in enumerate(rs.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3 0]
Test: index=[5 2]
Fold 1:
Train: index=[4 0 2]
Test: index=[1 3]
Fold 2:
Train: index=[1 2 4]
Test: index=[3 5]
Fold 3:
Train: index=[3 4 1]
Test: index=[5 2]
Fold 4:
Train: index=[3 5 1]
Test: index=[2 4]
"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.1
| ShuffleSplit |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 3177,
"end": 3406
} | class ____(GemmConfig):
"""
ROCm subclass for GEMMs, with AMD backend specific tuneable kernargs
"""
matrix_instr_nonkdim: int = 16
waves_per_eu: int = 0
kpack: int = 2
@dataclasses.dataclass
| ROCmGemmConfig |
python | getsentry__sentry | src/sentry/web/frontend/idp_email_verification.py | {
"start": 467,
"end": 2169
} | class ____(BaseView):
# the user using this endpoint is currently locked out of their account so auth isn't required.
auth_required = False
def handle(self, request: HttpRequest, key: str) -> HttpResponse:
verification_value = get_verification_value_from_key(key)
if not verification_value:
return render_to_response("sentry/idp_account_not_verified.html", request=request)
org = self._recover_org_slug(verification_value)
context = {"org": org}
if verification_value and org:
request.session[SSO_VERIFICATION_KEY] = key
user_id = verification_value.get("user_id")
if user_id:
key = f"{SSO_VERIFICATION_KEY}:{user_id}"
cache.set(key, True, timeout=300)
logger.info(
"sso.login-pipeline.verified-email-set-cache",
extra={
"user_id": user_id,
"organization_id": verification_value.get("organization_id"),
},
)
return render_to_response(
"sentry/idp_account_verified.html", context=context, request=request
)
return render_to_response("sentry/idp_account_not_verified.html", request=request)
@staticmethod
def _recover_org_slug(verification_value):
organization_id = verification_value.get("organization_id")
if organization_id is None:
return None
try:
return OrganizationMapping.objects.get(organization_id=organization_id).slug
except OrganizationMapping.DoesNotExist:
return None
| AccountConfirmationView |
python | jmcnamara__XlsxWriter | xlsxwriter/chart_title.py | {
"start": 291,
"end": 3344
} | class ____:
"""
A class to represent an Excel chart title.
This class encapsulates all title related properties and methods for the
chart title and axis titles.
"""
def __init__(self) -> None:
"""
Initialize a ChartTitle instance.
"""
self.font: Optional[Dict[str, Any]] = None
self.name: Optional[str] = None
self.formula: Optional[str] = None
self.data_id: Optional[int] = None
self.layout: Optional[Dict[str, Any]] = None
self.overlay: Optional[bool] = None
self.hidden: bool = False
self.line: Optional[Dict[str, Any]] = None
self.fill: Optional[Dict[str, Any]] = None
self.pattern: Optional[Dict[str, Any]] = None
self.gradient: Optional[Dict[str, Any]] = None
def has_name(self) -> bool:
"""
Check if the title has a text name set.
Returns:
True if name has been set.
"""
return self.name is not None and self.name != ""
def has_formula(self) -> bool:
"""
Check if the title has a formula set.
Returns:
True if formula has been set.
"""
return self.formula is not None
def has_formatting(self) -> bool:
"""
Check if the title has any formatting properties set.
Returns:
True if the title has line, fill, pattern, or gradient formatting.
"""
has_line = self.line is not None and self.line.get("defined", False)
has_fill = self.fill is not None and self.fill.get("defined", False)
has_pattern = self.pattern
has_gradient = self.gradient
return has_line or has_fill or has_pattern or has_gradient
def get_formatting(self) -> Dict[str, Any]:
"""
Get a dictionary containing the formatting properties.
Returns:
A dictionary with line, fill, pattern, and gradient properties.
"""
return {
"line": self.line,
"fill": self.fill,
"pattern": self.pattern,
"gradient": self.gradient,
}
def is_hidden(self) -> bool:
"""
Check if the title is explicitly hidden.
Returns:
True if title is hidden.
"""
return self.hidden
def __repr__(self) -> str:
"""
Return a string representation of the ChartTitle.
"""
return (
f"ChartTitle(\n"
f" name = {self.name!r},\n"
f" formula = {self.formula!r},\n"
f" hidden = {self.hidden!r},\n"
f" font = {self.font!r},\n"
f" line = {self.line!r},\n"
f" fill = {self.fill!r},\n"
f" pattern = {self.pattern!r},\n"
f" gradient = {self.gradient!r},\n"
f" layout = {self.layout!r},\n"
f" overlay = {self.overlay!r},\n"
f" has_formatting = {self.has_formatting()!r},\n"
f")\n"
)
| ChartTitle |
python | huggingface__transformers | tests/models/altclip/test_modeling_altclip.py | {
"start": 10680,
"end": 12547
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (AltCLIPTextModel,) if is_torch_available() else ()
# TODO (@SunMarc): Fix me
@unittest.skip(reason="It's broken.")
def test_resize_tokens_embeddings(self):
super().test_resize_tokens_embeddings()
def setUp(self):
self.model_tester = AltCLIPTextModelTester(self)
self.config_tester = ConfigTester(self, config_class=AltCLIPTextConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_model_outputs_equivalence(self):
pass
@unittest.skip(reason="Result of the model is a dict")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="AltCLIP does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "BAAI/AltCLIP"
model = AltCLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| AltCLIPTextModelTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 23715,
"end": 23806
} | class ____(DagsterError):
"""Error raised by invalid asset key."""
| DagsterInvalidAssetKey |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E23.py | {
"start": 2033,
"end": 2339
} | class ____[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]:
def pep_696_bad_method[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes](
self,
x:A = "foo"[::-1],
y:B = [[["foo", "bar"]]],
z:object = "fooo",
):
pass
| PEP696Bad |
python | Farama-Foundation__Gymnasium | tests/utils/test_env_checker_with_gym.py | {
"start": 620,
"end": 3207
} | class ____(gymnasium.Env):
def __init__(self):
self.action_space = gymnasium.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(2)
def test_check_env_with_gym():
with pytest.raises(
TypeError,
match=re.escape(
"The environment must inherit from the gymnasium.Env class, actual class: <class"
),
):
check_env(NoClassEnv())
with pytest.raises(
TypeError,
match=re.escape(
"Gym is incompatible with Gymnasium, please update the environment class to `gymnasium.Env`."
),
):
check_env(IncorrectEnv())
with pytest.raises(
TypeError,
match=re.escape(
"Gym is incompatible with Gymnasium, please update the environment observation_space to `<class 'gymnasium.spaces.space.Space'>`."
),
):
check_env(IncorrectObs())
with pytest.raises(
TypeError,
match=re.escape(
"Gym is incompatible with Gymnasium, please update the environment action_space to `<class 'gymnasium.spaces.space.Space'>`."
),
):
check_env(IncorrectAction())
def test_passive_env_checker_with_gym():
gymnasium.register("NoClassEnv", NoClassEnv)
gymnasium.register("IncorrectEnv", IncorrectEnv)
gymnasium.register("IncorrectObs", IncorrectObs)
gymnasium.register("IncorrectAction", IncorrectAction)
with pytest.raises(
TypeError,
match=re.escape(
"The environment must inherit from the gymnasium.Env class, actual class: <class"
),
):
gymnasium.make("NoClassEnv")
with pytest.raises(
TypeError,
match=re.escape(
"Gym is incompatible with Gymnasium, please update the environment class to `gymnasium.Env`."
),
):
gymnasium.make("IncorrectEnv")
with pytest.raises(
TypeError,
match=re.escape(
"Gym is incompatible with Gymnasium, please update the environment observation_space to `<class 'gymnasium.spaces.space.Space'>`."
),
):
gymnasium.make("IncorrectObs")
with pytest.raises(
TypeError,
match=re.escape(
"Gym is incompatible with Gymnasium, please update the environment action_space to `<class 'gymnasium.spaces.space.Space'>`."
),
):
gymnasium.make("IncorrectAction")
gymnasium.registry.pop("NoClassEnv")
gymnasium.registry.pop("IncorrectEnv")
gymnasium.registry.pop("IncorrectObs")
gymnasium.registry.pop("IncorrectAction")
| IncorrectObs |
python | huggingface__transformers | src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py | {
"start": 5567,
"end": 8082
} | class ____(nn.Module):
"""
Duration predictor module.
This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to
Speech' https://huggingface.co/papers/1905.09263 The duration predictor predicts a duration of each frame in log domain
from the hidden embeddings of encoder.
Note:
The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the
outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
"""
def __init__(self, config: FastSpeech2ConformerConfig):
super().__init__()
self.conv_layers = nn.ModuleList()
self.log_domain_offset = 1.0
for layer_idx in range(config.duration_predictor_layers):
num_chans = config.duration_predictor_channels
input_channels = config.hidden_size if layer_idx == 0 else num_chans
layer = FastSpeech2ConformerPredictorLayer(
input_channels,
num_chans,
config.duration_predictor_kernel_size,
config.duration_predictor_dropout_rate,
)
self.conv_layers.append(layer)
self.linear = nn.Linear(config.duration_predictor_channels, 1)
def forward(self, encoder_hidden_states):
"""
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
`torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
"""
# (batch_size, input_dim, max_text_length)
hidden_states = encoder_hidden_states.transpose(1, -1)
for layer in self.conv_layers:
hidden_states = layer(hidden_states)
# NOTE: calculate in log domain, (batch_size, max_text_length)
hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)
if not self.training:
# NOTE: calculate in linear domain
hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()
return hidden_states
# Copied from transformers.models.speecht5.modeling_speecht5.SpeechT5BatchNormConvLayer
| FastSpeech2ConformerDurationPredictor |
python | joke2k__faker | faker/providers/person/nl_NL/__init__.py | {
"start": 44,
"end": 32748
} | class ____(PersonProvider):
# conforming to http://nl.wikipedia.org/wiki/Achternaam#Naamswijziging and
# http://en.wikipedia.org/wiki/Dutch_name#Dutch_naming_law_.28surnames.29
# by adding a "-" between the two last names when someone is married
formats = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
)
first_names_male = (
"Aaron",
"Abel",
"Adam",
"Aiden",
"Alex",
"Alexander",
"Ali",
"Amin",
"Amir",
"Arie",
"Aron",
"Arthur",
"Ayden",
"Ayoub",
"Bart",
"Bas",
"Bastiaan",
"Beau",
"Ben",
"Benjamin",
"Berat",
"Berend",
"Bilal",
"Bjorn",
"Boaz",
"Boris",
"Bradley",
"Bram",
"Brent",
"Brian",
"Bryan",
"Cas",
"Casper",
"Chris",
"Colin",
"Collin",
"Cornelis",
"Daan",
"Damian",
"Dani",
"Daniel",
"Daniël",
"Dave",
"David",
"Dean",
"Dex",
"Dion",
"Dirk",
"Duuk",
"Dylan",
"Dylano",
"Elias",
"Emir",
"Faas",
"Fabian",
"Fedde",
"Felix",
"Finn",
"Florian",
"Floris",
"Gerrit",
"Giel",
"Gijs",
"Giovanni",
"Guus",
"Hamza",
"Hendrik",
"Hidde",
"Hugo",
"Ian",
"Ibrahim",
"Imran",
"Ivan",
"Jack",
"Jacob",
"Jake",
"James",
"Jamie",
"Jan",
"Jari",
"Jason",
"Jasper",
"Jay",
"Jayden",
"Jayson",
"Jelle",
"Jelte",
"Jens",
"Jesper",
"Jesse",
"Jim",
"Jip",
"Job",
"Joep",
"Joey",
"Johannes",
"Jonas",
"Jonathan",
"Joost",
"Jordy",
"Joris",
"Jorn",
"Jort",
"Joshua",
"Joël",
"Jules",
"Julian",
"Julius",
"Jurre",
"Justin",
"Kai",
"Kay",
"Keano",
"Kevin",
"Kian",
"Kick",
"Koen",
"Kyan",
"Kyano",
"Lars",
"Laurens",
"Lenn",
"Leon",
"Levi",
"Lex",
"Liam",
"Loek",
"Lorenzo",
"Luc",
"Luca",
"Lucas",
"Luka",
"Lukas",
"Luke",
"Luuk",
"Maarten",
"Mads",
"Marijn",
"Marinus",
"Mark",
"Mart",
"Mason",
"Mathijs",
"Mats",
"Matthias",
"Matthijs",
"Maurits",
"Max",
"Maxim",
"Mees",
"Mehmet",
"Melle",
"Merijn",
"Micha",
"Michael",
"Mick",
"Mika",
"Mike",
"Milan",
"Milo",
"Mohamed",
"Mohammed",
"Morris",
"Muhammed",
"Mustafa",
"Nathan",
"Naud",
"Nick",
"Niek",
"Niels",
"Noah",
"Noud",
"Nout",
"Olaf",
"Olivier",
"Oscar",
"Owen",
"Pepijn",
"Philip",
"Pieter",
"Pim",
"Quinn",
"Quinten",
"Raf",
"Rafael",
"Ravi",
"Rayan",
"Rens",
"Rick",
"Rik",
"Riley",
"Roan",
"Robin",
"Rowan",
"Ruben",
"Ryan",
"Sam",
"Sami",
"Samuel",
"Sander",
"Sebastiaan",
"Sem",
"Senn",
"Senna",
"Sep",
"Sepp",
"Seth",
"Siem",
"Sil",
"Simon",
"Sjoerd",
"Stan",
"Stef",
"Stefan",
"Sten",
"Stijn",
"Sven",
"Teun",
"Thijmen",
"Thijn",
"Thijs",
"Thom",
"Thomas",
"Ties",
"Tijmen",
"Tijn",
"Tijs",
"Tim",
"Timo",
"Tobias",
"Tom",
"Tristan",
"Twan",
"Tycho",
"Tygo",
"Tyler",
"Valentijn",
"Victor",
"Vigo",
"Vince",
"Vincent",
"Wesley",
"Wessel",
"Willem",
"Wout",
"Wouter",
"Xavi",
"Yassin",
"Youssef",
"Yusuf",
"Zakaria",
)
first_names_female = (
"Aaliyah",
"Adriana",
"Aimée",
"Alicia",
"Alyssa",
"Amber",
"Amelia",
"Amina",
"Amira",
"Amy",
"Amélie",
"Angelina",
"Anna",
"Annabel",
"Anne",
"Annemijn",
"Anouk",
"Ashley",
"Aya",
"Aylin",
"Azra",
"Bente",
"Benthe",
"Bibi",
"Bo",
"Britt",
"Carlijn",
"Catharina",
"Cato",
"Ceylin",
"Charlotte",
"Chloé",
"Chloë",
"Cornelia",
"Dana",
"Danique",
"Daphne",
"Demi",
"Dewi",
"Dina",
"Ecrin",
"Elena",
"Elif",
"Elin",
"Eline",
"Elisa",
"Elisabeth",
"Elise",
"Eliza",
"Elizabeth",
"Elize",
"Ella",
"Emily",
"Emma",
"Esila",
"Esmee",
"Esmée",
"Esther",
"Eva",
"Evelien",
"Evi",
"Evie",
"Evy",
"Fabiënne",
"Fatima",
"Fay",
"Faye",
"Feline",
"Fem",
"Femke",
"Fenna",
"Fenne",
"Fien",
"Fiene",
"Fleur",
"Floor",
"Floortje",
"Frederique",
"Féline",
"Guusje",
"Hailey",
"Hanna",
"Hannah",
"Helena",
"Ilse",
"Imke",
"Inaya",
"Indy",
"Iris",
"Isa",
"Isabel",
"Isabella",
"Isabelle",
"Ise",
"Isis",
"Ivy",
"Ize",
"Jade",
"Janna",
"Janne",
"Jasmijn",
"Jayda",
"Jaylinn",
"Jenna",
"Jennifer",
"Jente",
"Jet",
"Jill",
"Jinthe",
"Johanna",
"Jolie",
"Jolijn",
"Josephine",
"Joy",
"Joëlle",
"Julia",
"Julie",
"Juliette",
"Juul",
"Karlijn",
"Kate",
"Kaylee",
"Kayleigh",
"Kiki",
"Kim",
"Kyara",
"Kyra",
"Lana",
"Lara",
"Laura",
"Lauren",
"Leah",
"Lena",
"Lieke",
"Lieve",
"Lily",
"Lina",
"Linde",
"Lindsey",
"Linn",
"Lisa",
"Lisanne",
"Lise",
"Liv",
"Livia",
"Liz",
"Liza",
"Lize",
"Lizz",
"Lizzy",
"Loes",
"Lois",
"Lola",
"Lot",
"Lotte",
"Louise",
"Loïs",
"Lucy",
"Luna",
"Lynn",
"Maaike",
"Maartje",
"Madelief",
"Maja",
"Mara",
"Mare",
"Maria",
"Marit",
"Maryam",
"Maud",
"Maya",
"Megan",
"Meike",
"Melissa",
"Merel",
"Merle",
"Mette",
"Mia",
"Michelle",
"Mila",
"Milou",
"Mirte",
"Mirthe",
"Myrthe",
"Nadia",
"Nadine",
"Naomi",
"Nienke",
"Nikki",
"Nina",
"Ninthe",
"Nisa",
"Noa",
"Noor",
"Noortje",
"Nora",
"Norah",
"Nova",
"Noëlle",
"Nynke",
"Olivia",
"Phileine",
"Pien",
"Pippa",
"Pleun",
"Puck",
"Puk",
"Quinty",
"Renske",
"Robin",
"Romy",
"Roos",
"Rosa",
"Rosalie",
"Saar",
"Sam",
"Sanne",
"Sara",
"Sarah",
"Selena",
"Selina",
"Senna",
"Sienna",
"Silke",
"Sofia",
"Sofie",
"Sophia",
"Sophie",
"Stella",
"Sterre",
"Suus",
"Suze",
"Sylvie",
"Tara",
"Tess",
"Tessa",
"Tirza",
"Vajèn",
"Valerie",
"Veerle",
"Vera",
"Victoria",
"Yara",
"Yasmin",
"Yasmine",
"Yfke",
"Yinthe",
"Zara",
"Zeynep",
"Zoey",
"Zoë",
)
first_names = first_names_male + first_names_female
last_names = (
"'s Gravensande",
"Aalts",
"Aarden",
"Aarts",
"Adelaar",
"Adriaansen",
"Adriaensdr",
"Adriaense",
"Adryaens",
"Aeije",
"Aelftrud van Wessex",
"Aertsz",
"Alpaidis",
"Amalrada",
"Ansems",
"Appelman",
"Arens",
"Arent",
"Ariens",
"Ariens Ansems",
"Arnold",
"Arts",
"Aschman",
"Backer",
"Bakker",
"Barents",
"Bartels",
"Bastiaanse",
"Bastiaense",
"Bave",
"Becht",
"Beekman",
"Beernink",
"Beijring",
"Bekbergen",
"Bellemans",
"Belpere",
"Beourgeois",
"Berends",
"Berendse",
"Bernaards",
"Bertho",
"Bezemer",
"Bierstraten",
"Bijlsma",
"Billung",
"Blaak",
"Blees",
"Bleijenberg",
"Blewanus",
"Bloemendaal",
"Blokland",
"Blom",
"Blom",
"Blonk",
"Boddaugh",
"Boer",
"Boer",
"Boers",
"Boeser",
"Boetet",
"Bolkesteijn",
"Booden",
"Boogaerts",
"Borman",
"Bos",
"Bos",
"Bosch",
"Bosch",
"Bosman",
"Boudewijns",
"Bouhuizen",
"Bourgondië, van",
"Bouthoorn",
"Bouwhuisen",
"Brandon",
"Brands",
"Brandt",
"Bresse",
"Bresé",
"Breugelensis",
"Briere",
"Brievingh",
"Brisee",
"Brizee",
"Broeckx",
"Broeders",
"Broek",
"Broekhoven",
"Broeshart",
"Bronder",
"Brouwer",
"Brouwer",
"Bruggeman",
"Brugman",
"Bruijne van der Veen",
"Brumleve",
"Bruynzeels",
"Bud",
"Buijs",
"Butselaar",
"Bökenkamp",
"Cadefau",
"Cammel",
"Cant",
"Carnotte",
"Charon",
"Chevresson",
"Chotzen",
"Chrodtrud",
"Claassen",
"Claesdr",
"Claesner",
"Coenen",
"Coolen",
"Coret",
"Coret-Coredo",
"Coreth von und zu Coredo und Starkenberg",
"Cornelisse",
"Cornelissen",
"Cornelisz",
"Corstiaens",
"Cosman",
"Courtier",
"Dachgelder",
"Dachgeldt",
"Dachgelt",
"David",
"Dekker",
"Dekker",
"Demmendaal",
"Dennenberg",
"Die Bont",
"Diesbergen",
"Dijkman",
"Dijkstra",
"Dircken",
"Dirksen",
"Dirven",
"Doesburg",
"Doorhof",
"Doornhem",
"Dorsman",
"Doyle",
"Draaisma",
"Dries",
"Driessen",
"Drysdale",
"Dubois",
"Duivenvoorden",
"Eckhardt",
"Eelman",
"Eerden",
"Ehlert",
"Eijkelboom",
"Elberts",
"Elbertse",
"Ellis",
"Elsemulder",
"Elsenaar",
"Emmen",
"Engels",
"Erhout",
"Ernst",
"Estey",
"Everde",
"Evers",
"Everts",
"Fechant",
"Feenstra",
"Feltzer",
"Ferran",
"Fiere",
"Flink",
"Fortuyn",
"Frankhuizen",
"François",
"Françoise",
"Fredriks",
"Fremie",
"Frerichs",
"Freshour",
"Friehus",
"Furda",
"Galenzone",
"Galijn",
"Garret",
"Geerling",
"Geerts",
"Geertsen",
"Geldens",
"Gellemeyer",
"Gemen",
"Geneart",
"Genefaas",
"Gepa van Bourgondië",
"Gerrits",
"Gerritse",
"Gerritsen",
"Gervais",
"Ghoerle",
"Giselmeyer",
"Glasses",
"Gnodde",
"Goderts",
"Godfrey van Alemannië",
"Goedhart",
"Goudriaan",
"Govarts",
"Goyaerts van Waderle",
"Greij",
"Groen",
"Groenendaal",
"Groenestein",
"Grondel",
"Groote",
"Gruijl",
"Guit",
"Haack",
"Haengreve",
"Hagendoorn",
"Hak",
"Hakker",
"Haneberg",
"Hanegraaff",
"Haring",
"Haselaar",
"Hazenveld",
"Heere",
"Heerkens",
"Heerschop",
"Hehl",
"Heijman",
"Heijmans",
"Heijmen",
"Heinrichs",
"Hekker",
"Hellevoort",
"Helmerhorst",
"Hemma van Allemanië",
"Hendricks",
"Hendriks",
"Hendriks",
"Hendrikse",
"Henric van den Nuwenhuse",
"Heribert van Laon",
"Hermans",
"Hermans",
"Hexspoor",
"Heymans",
"Heyne",
"Hoedemakers",
"Hoeks",
"Hoekstra",
"Hoelen",
"Hoes",
"Hofman",
"Hollander",
"Holthuis",
"Hondeveld",
"Honing",
"Hoogers",
"Hoppenbrouwer",
"Horrocks",
"Houdijk",
"Huberts",
"Huel",
"Huijben",
"Huijbrechts",
"Huijs",
"Huijzing",
"Huisman",
"Huisman",
"Huls",
"Hulshouts",
"Hulskes",
"Hulst",
"Huurdeman",
"Höning",
"Jaceps",
"Jacobi",
"Jacobs",
"Jacobs",
"Jacquot",
"Jans",
"Jansdr",
"Janse",
"Jansen",
"Jansen",
"Jansen",
"Jansse",
"Janssen",
"Janssen",
"Janssens",
"Jdotte",
"Jeggij",
"Jekel",
"Jerusalem",
"Jochems",
"Jones",
"Jonker",
"Jonkman",
"Joosten",
"Jorlink",
"Jorrisen",
"Jurrijens",
"Kallen",
"Kalman",
"Kamp",
"Kamper",
"Karels",
"Kas",
"Kathagen",
"Keijser",
"Keijzer",
"Keltenie",
"Kerkhof",
"Ketel",
"Ketting",
"Kirpenstein",
"Kisman",
"Kleibrink",
"Kleijse",
"Klein",
"Klerks",
"Kleybrink",
"Klomp Jan",
"Kloppert",
"Knoers",
"Knuf",
"Koeman",
"Kof",
"Kok",
"Kok",
"Kolen",
"Kolster",
"Koning",
"Konings",
"Koret",
"Korsman",
"Korstman",
"Kort",
"Kortman",
"Kosten",
"Koster",
"Koster",
"Krabbe",
"Kramer",
"Kremer",
"Kriens",
"Kronenberg",
"Kruns",
"Kuijpers",
"Kuijpers",
"Kuilenburg",
"Kuiper",
"Kuipers",
"Kuit",
"Kunen",
"Kwaadland",
"Köster",
"Labado",
"Laffray",
"Lafleur",
"Lage",
"Lagerweij",
"Lambers",
"Lambregt",
"Lamore",
"Lamotte",
"Langevoort",
"Lankle",
"Lansink",
"Lathrope",
"Latier",
"Le Grand",
"Le Marec",
"Leene",
"Leguit",
"Lelijveld",
"Lemmens",
"Lensen",
"Lether",
"Levesque",
"Lieshout",
"Ligtvoet",
"Lijn",
"Lind",
"Linschoten",
"Lips",
"Loep",
"Lommert",
"Lonen",
"Loreal",
"Lorreijn",
"Louws",
"Luboch",
"Lucas",
"Luitgardis van Neustrië",
"Luster",
"Lutterveld",
"Maas",
"Maas",
"Maaswinkel",
"Mahieu",
"Mallien",
"Mangel",
"Manne",
"Mansveld",
"Mansvelt",
"Marceron",
"Marchal",
"Marchand",
"Martel",
"Martens",
"Martens",
"Massa",
"Mater",
"Mathieu",
"Mathol",
"Mathurin",
"Matthews",
"Meeres",
"Meeusen",
"Meijer",
"Meijer",
"Meis",
"Melet",
"Mens",
"Mercks",
"Merckx",
"Merkx",
"Meyer",
"Meyer",
"Michiels",
"Michielsen",
"Middelkoop",
"Mijsberg",
"Miltenburg",
"Miner",
"Moenen",
"Moensendijk",
"Moet",
"Mol",
"Mol",
"Molegraaf",
"Molen",
"Molenaar",
"Momberg",
"Mosley",
"Mudden",
"Muijs",
"Mulder",
"Mulder",
"Mulders",
"Muller",
"Nedermeijer",
"Nek",
"Neuteboom",
"Neuzerling",
"Niermann",
"Nieuwstraten",
"Nihoe",
"Nijman",
"Nollee",
"Noordijk",
"Oda",
"Oemencs",
"Oennen",
"Olthof",
"Olykan",
"Ooms",
"Oosterhek",
"Oosterhout",
"Oostveen",
"Opmans",
"Osterhoudt",
"Otte",
"Otto",
"Oude Heer",
"Ouwel",
"Ouwerkerk",
"Overdijk",
"Overeem",
"Oversteeg",
"Paillet",
"Palman",
"Pasman",
"Passchiers",
"Pastoors",
"Pauwels",
"Peeters",
"Perck",
"Perkins",
"Peronne",
"Perrono",
"Persijn",
"Peters",
"Peterse",
"Phillipsen",
"Pierson",
"Pieters",
"Pieters van der Maes",
"Pison",
"Poncelet",
"Ponci",
"Pons",
"Post",
"Post",
"Postma",
"Potters",
"Pratt",
"Prins",
"Prinsen",
"Puig",
"Rackham",
"Rademaker",
"Ramaker",
"Recer",
"Recers",
"Rehorst",
"Reijers",
"Reimes",
"Rek",
"Remmers",
"Ridder",
"Riem",
"Rietveld",
"Rijcken",
"Rijks",
"Rijn",
"Rijntjes",
"Rippey",
"Risma",
"Robbrechts Bruijne",
"Roessink",
"Romijn",
"Roodesteijn",
"Room",
"Roose",
"Roosenboom",
"Rotteveel",
"Roukes",
"Rousselet",
"Rouwenhorst",
"Rouwhorst",
"Rubben",
"Ruijs",
"Rutten",
"Salet",
"Sam",
"Sanders",
"Sanders",
"Sarneel",
"Sas",
"Saxo",
"Scardino",
"Schagen",
"Schakelaar",
"Scharroo",
"Schatteleijn",
"Scheer",
"Scheffers",
"Schellekens",
"Schelvis",
"Schenk",
"Schenkel",
"Scherms",
"Schiffer",
"Schilt",
"Schipper",
"Schokman",
"Scholten",
"Scholten",
"Schotte",
"Schouten",
"Schrant",
"Schrik",
"Schroeff",
"Schulten",
"Schuurmans",
"Schuylenborch",
"Schwartsbach",
"Scuylenborchs",
"Segerszoen",
"Serra",
"Sestig",
"Shupe",
"Simonis",
"Simons",
"Sire",
"Sitters",
"Slaetsdochter",
"Slagmolen",
"Slingerland",
"Smeets",
"Smit",
"Smit",
"Smith",
"Smits",
"Smits",
"Soos",
"Spaan",
"Spanhaak",
"Speijer",
"Spier",
"Spies",
"Spiker",
"Spreeuw",
"Sprong",
"Spruit",
"Spruyt",
"Stamrood",
"Stange",
"Steenbakkers",
"Steenbeek",
"Steinmeiern",
"Sterkman",
"Stettyn",
"Stichter",
"Stinis",
"Stoffel",
"Stoffelsz",
"Stook",
"Strijker",
"Strik",
"Stuivenberg",
"Suijker",
"Symons",
"Takkelenburg",
"Tammerijn",
"Tamsma",
"Terry",
"Teunissen",
"Texier",
"Thatcher",
"The Elder",
"Thomas",
"Thout",
"Tielemans",
"Tillmanno",
"Timmerman",
"Timmermans",
"Timmermans",
"Tins",
"Tirie",
"Totwiller",
"Tuithof",
"Uit de Willigen",
"Uittenbosch",
"Ulrich",
"Unruoch Hunerik",
"Uphaus",
"Uphuis",
"Uphus",
"VI",
"Vaessen",
"Vallenduuk",
"Van Bragt",
"Vandenbergh",
"Vastenhouw",
"Veenendaal",
"Veenstra",
"Vegt",
"Velderman",
"Veltman",
"Verbeeck",
"Verbeek",
"Verbeek",
"Verboom",
"Verbruggen",
"Verda",
"Vergeer",
"Verhaar",
"Verhagen",
"Verharen",
"Verheij",
"Verheuvel",
"Verhoeven",
"Verhoeven",
"Verkade",
"Vermeulen",
"Vermeulen",
"Verschuere",
"Verschut",
"Versluijs",
"Vertoor",
"Vertooren",
"Vervoort",
"Verwoert",
"Vial",
"Vierdag",
"Vignon",
"Vink",
"Visser",
"Volcke",
"Voortman",
"Vos",
"Vos",
"Vrancken",
"Waardeloo",
"Wagenvoort",
"Walsteijn",
"Walter",
"Waltrade Walderade",
"Weeldenburg",
"Weerdenburg",
"Weijland",
"Weijters",
"Welf",
"Wendt",
"Wensen",
"Werdes",
"Werl-Arnsberg, van",
"West-Francië, van",
"Westerbeek",
"Westerburg",
"Westermann",
"Wever",
"Weyland",
"Weylant",
"Wigman",
"Wijland",
"Wilcken",
"Wildschut",
"Willems",
"Willems",
"Willems van Lier",
"Willemsen",
"Willemsen",
"Wilmont",
"Wilson",
"Winnrich",
"Winters",
"Wipstrik",
"Wolffel",
"Wolfsdr",
"Wolfswinkel",
"Wolters",
"Wolters",
"Wolzak",
"Wooning",
"Woudenberg",
"Wouters",
"Wouters van Eijndhoven",
"Woutersz",
"Wright",
"Wunderink",
"Wutke",
"Zaal",
"Zeemans",
"Zeldenrust",
"Zevenboom",
"Zijlemans",
"Zijlmans",
"Zuidweg",
"Zuijdveld",
"Zwart",
"Zwijsen",
"d' Heripon",
"de Backer",
"de Beer",
"de Bock",
"de Boer",
"de Boer",
"de Bont",
"de Bruijn",
"de Bruijn",
"de Bruin",
"de Bruin",
"de Bruyn",
"de Graaf",
"de Graaf",
"de Gratie",
"de Groot",
"de Groot",
"de Grote",
"de Gruijl",
"de Gruijter",
"de Gruil",
"de Grunt",
"de Gruson",
"de Haan",
"de Haas",
"de Heer",
"de Hoog",
"de Hoogh",
"de Jager",
"de Jode Vastraedsd",
"de Jong",
"de Jong",
"de Jonge",
"de Kale",
"de Keijser",
"de Keijzer",
"de Kok",
"de Koning",
"de Koning",
"de Korte",
"de Lange",
"de Leeuw",
"de Man",
"de Marduras",
"de Mol",
"de Nijs",
"de Pauw",
"de Plantard",
"de Reede",
"de Roo",
"de Roos",
"de Ruiter",
"de Smit",
"de Strigter",
"de Swart",
"de Vos",
"de Vries",
"de Vries",
"de Vroege",
"de Vrome",
"de Werd",
"de Wit",
"de Wit",
"de la Fleche",
"den Buytelaar",
"den Haag",
"den Teuling",
"der Kijnder",
"die Bont",
"die Pelser",
"die Witte",
"le Briel",
"le Floch",
"le Gallen",
"le Guellec",
"le Gulcher",
"le Luc",
"le Matelot",
"ter Waarbeek",
"van 't Erve",
"van 't Houteveen",
"van 't Riet",
"van 't Wel",
"van Alenburg",
"van Allemanië",
"van Amstel",
"van Arkel",
"van Arnsberg",
"van Asten",
"van Baalen",
"van Beaumont",
"van Beeck",
"van Beeck Beeckmans",
"van Beek",
"van Beek",
"van Beieren",
"van Bentheim",
"van Bergen",
"van Berkel",
"van Berkum",
"van Bernicia",
"van Boulogne",
"van Boven",
"van Bovene",
"van Bovenen",
"van Brenen",
"van Breugel",
"van Breukeleveen",
"van Breukelveen",
"van Bruchem",
"van Brunswijk",
"van Bunschoten",
"van Buuren",
"van Clootwijck",
"van Cuijck",
"van Daal",
"van Dagsburg",
"van Dalem",
"van Dam",
"van Dam",
"van Dijk",
"van Dijk",
"van Dillen",
"van Dokkum",
"van Dommelen",
"van Dongen",
"van Dongen",
"van Dooren",
"van Doorn",
"van Drenthe",
"van Duivenvoorde",
"van Duvenvoirde",
"van Duyvenvoorde",
"van Eck",
"van Egisheim",
"van Embden",
"van Emmelen",
"van Engeland",
"van Engelen",
"van Enschot",
"van Es",
"van Este",
"van Evelingen",
"van Formbach",
"van Gastel",
"van Geenen",
"van Geest",
"van Geffen",
"van Gelder",
"van Gemert",
"van Gent",
"van Ghoerle",
"van Gils",
"van Ginkel",
"van Ginneke",
"van Goerle",
"van Gorp",
"van Grinsven",
"van Grondelle",
"van Haarlem",
"van Haeften",
"van Hagen",
"van Ham",
"van Hamaland",
"van Haspengouw",
"van Haspengouw Hesbaye",
"van Hemert",
"van Henegouwen",
"van Herstal",
"van Heusden",
"van Hoevel en van Zwindrecht",
"van Holland",
"van Hostaden",
"van Hulten",
"van Jumiège",
"van Kasteelen",
"van Kempen",
"van Klaarwater",
"van Kuijc",
"van Kuijc van Malsen",
"van Kusen",
"van Laar",
"van Laarhoven",
"van Landen",
"van Laon",
"van Leeuwen",
"van Leeuwen",
"van Leuven",
"van Liendert",
"van Limburg",
"van Loon",
"van Loon",
"van Lucel",
"van Luin",
"van Luinenburg",
"van Luxemburg",
"van Luyssel",
"van Maaren",
"van Maasgouw",
"van Mare",
"van Metz",
"van Mil",
"van Mispelen",
"van Mook",
"van Munster",
"van Nederlotharingen",
"van Nes",
"van Nimwegen",
"van Noordeloos",
"van Noort",
"van Northeim",
"van Nus",
"van Ochten",
"van Oirschot",
"van Olst",
"van Ommeren",
"van Ooste",
"van Oosten",
"van Oostendorp",
"van Ooyen",
"van Opper-Lotharingen",
"van Orleans",
"van Oudewater",
"van Parijs",
"van Poppel",
"van Praagh",
"van Rheineck",
"van Riet",
"van Rijnsbergen",
"van Rijthoven",
"van Roijen",
"van Rooij",
"van Rossum",
"van Saksen",
"van Salm",
"van Salmen",
"van Santen",
"van Schevinghuizen",
"van Schweinfurt",
"van Soest",
"van Spreeuwel",
"van Spreuwel",
"van Straaten",
"van Stralen",
"van Suinvorde",
"van Susa",
"van Tours",
"van Tuijl",
"van Veen",
"van Velthoven",
"van Velzen",
"van Venrooy",
"van Verdun",
"van Vermandois",
"van Vlaanderen",
"van Vliet",
"van Voorhout",
"van Voorst",
"van Waas",
"van Wallaert",
"van Wassenaar",
"van Wel",
"van Wessex",
"van Westfalen",
"van Wickerode",
"van Wijk",
"van Wijland",
"van Zwaben",
"van de Berg",
"van de Biesenbos",
"van de Biezenbos",
"van de Brink",
"van de Coterlet",
"van de Darnau",
"van de Eerenbeemt",
"van de Elzas",
"van de Greef",
"van de Klashorst",
"van de Kooij",
"van de Leemput",
"van de Noordmark",
"van de Pavert",
"van de Plas",
"van de Pol",
"van de Veen",
"van de Velde",
"van de Velden",
"van de Ven",
"van de Ven",
"van de Wal",
"van de Water",
"van de Weterink",
"van de Wiel",
"van den Assem",
"van den Berg",
"van den Berg",
"van den Bergh",
"van den Bosch",
"van den Brand",
"van den Brink",
"van den Brink",
"van den Broek",
"van den Broek",
"van den Corput",
"van den Eerenbeemt",
"van den Eijssel",
"van den Henst",
"van den Heuvel",
"van den Hoek",
"van den Nieuwenhuijsen",
"van den Nuwenhijsen",
"van den Nuwenhuijzen",
"van den Nuwenhuysen",
"van den Nyeuwenhuysen",
"van den Oever",
"van den Pol",
"van den Velde",
"van den Velden",
"van den Wittenboer",
"van der Avoirt",
"van der Berg",
"van der Brink",
"van der Flaas",
"van der Heiden",
"van der Heijden",
"van der Heijden",
"van der Heyden",
"van der Hoeven",
"van der Horst",
"van der Horst",
"van der Kaay",
"van der Kint",
"van der Klein",
"van der Klijn",
"van der Laan",
"van der Laar",
"van der Laarse",
"van der Lede",
"van der Leek",
"van der Linden",
"van der Linden",
"van der Loo",
"van der Maath",
"van der Maes",
"van der Mast",
"van der Meer",
"van der Meulen",
"van der Noot",
"van der Plas",
"van der Ploeg",
"van der Pluijm",
"van der Pol",
"van der Pouw",
"van der Sande",
"van der Schuijt",
"van der Sloot",
"van der Smeede",
"van der Spaendonc",
"van der Spaendonck",
"van der Stael",
"van der Stael de Jonge",
"van der Steen",
"van der Strigt",
"van der Veen",
"van der Veiver",
"van der Velde",
"van der Velden",
"van der Ven",
"van der Wal",
"van der Zijl",
"van het Heerenveen",
)
| Provider |
python | huggingface__transformers | src/transformers/distributed/configuration_utils.py | {
"start": 711,
"end": 4425
} | class ____:
"""
Base class for distributed configs
"""
enable_expert_parallel: bool = False
# TODO: add tp_plan, pp_plan, device_mesh etc..
@classmethod
def from_dict(cls, config_dict, **kwargs):
"""
Constructs a DistributedConfig instance from a dictionary of parameters.
Args:
config_dict (Dict[str, Any]): Dictionary containing configuration parameters.
**kwargs: Additional keyword arguments to override dictionary values.
Returns:
DistributedConfig: Instance of DistributedConfig constructed from the dictionary.
"""
config = cls(**config_dict)
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
return config
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.to_json_file
def to_json_file(self, json_file_path: str | os.PathLike):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default
`QuantizationConfig()` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
config_dict = self.to_dict()
json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
writer.write(json_string)
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
return copy.deepcopy(self.__dict__)
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__iter__
def __iter__(self):
"""allows `dict(obj)` for situations where obj may be a dict or QuantizationConfigMixin"""
for attr, value in copy.deepcopy(self.__dict__).items():
yield attr, value
# Copied from transformers.utils.quantization_config.QuantizationConfigMixin.__repr__
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
def to_json_string(self):
"""
Serializes this instance to a JSON formatted string.
Returns:
str: JSON formatted string representing the configuration instance.
"""
return json.dumps(self.__dict__, indent=2) + "\n"
def update(self, **kwargs):
"""
Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,
returning all the unused kwargs.
Args:
kwargs (`Dict[str, Any]`):
Dictionary of attributes to tentatively update this class.
Returns:
`Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
"""
to_remove = []
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
to_remove.append(key)
# Remove all the attributes that were updated, without modifying the input dict
unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
return unused_kwargs
| DistributedConfig |
python | jd__tenacity | tenacity/retry.py | {
"start": 1604,
"end": 1813
} | class ____(retry_base):
"""Retry strategy that always rejects any result."""
def __call__(self, retry_state: "RetryCallState") -> bool:
return True
retry_always = _retry_always()
| _retry_always |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2_test.py | {
"start": 173505,
"end": 184428
} | class ____(test.TestCase):
def test_indicator_column(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
indicator_a = fc.indicator_column(a)
self.assertEqual(indicator_a.categorical_column.name, 'a')
self.assertEqual(indicator_a.name, 'a_indicator')
self.assertEqual(indicator_a.variable_shape, [1, 4])
self.assertTrue(indicator_a._is_v2_column)
b = fc_old._categorical_column_with_hash_bucket('b', hash_bucket_size=100)
indicator_b = fc.indicator_column(b)
self.assertEqual(indicator_b.categorical_column.name, 'b')
self.assertEqual(indicator_b.name, 'b_indicator')
self.assertEqual(indicator_b.variable_shape, [1, 100])
self.assertFalse(indicator_b._is_v2_column)
def test_not_categorical_input(self):
with self.assertRaisesRegex(ValueError, 'Unsupported input type.'):
fc.indicator_column('aaa')
def test_1D_shape_succeeds(self):
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
transformation_cache = fc.FeatureTransformationCache({
'animal': ['fox', 'fox']
})
output = transformation_cache.get(animal, None)
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]],
self.evaluate(output))
def test_2D_shape_succeeds(self):
# TODO(ispir/cassandrax): Switch to categorical_column_with_keys when ready.
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
transformation_cache = fc.FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0]],
values=['fox', 'fox'],
dense_shape=[2, 1])
})
output = transformation_cache.get(animal, None)
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]],
self.evaluate(output))
def test_multi_hot(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
transformation_cache = fc.FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2])
})
output = transformation_cache.get(animal, None)
self.assertAllEqual([[0., 2., 0., 0.]], self.evaluate(output))
def test_multi_hot2(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
transformation_cache = fc.FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
})
output = transformation_cache.get(animal, None)
self.assertAllEqual([[0., 1., 1., 0.]], self.evaluate(output))
def test_deep_copy(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
column = fc.indicator_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.categorical_column.name, 'a')
self.assertEqual(column.name, 'a_indicator')
self.assertEqual(column.variable_shape, [1, 4])
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
data = example_pb2.Example(
features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a_indicator]))
self.assertIn('aaa', features)
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]), self.evaluate(features['aaa']))
def test_transform(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
features = {
'aaa':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}
indicator_tensor = fc._transform_features_v2(features, [a_indicator],
None)[a_indicator]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual([[0, 0, 1], [1, 0, 0]], self.evaluate(indicator_tensor))
def test_transform_with_weighted_column(self):
# Github issue 12557
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'a', 'c']]),
'weights': constant_op.constant([[2., 4., 6., 1.]])
}
indicator_tensor = fc._transform_features_v2(features, [indicator],
None)[indicator]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual([[6., 4., 3.]], self.evaluate(indicator_tensor))
def test_transform_with_missing_value_in_weighted_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = fc._transform_features_v2(features, [indicator],
None)[indicator]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual([[0., 4., 2.]], self.evaluate(indicator_tensor))
def test_transform_with_missing_value_in_categorical_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
indicator = fc.indicator_column(ids)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
}
indicator_tensor = fc._transform_features_v2(features, [indicator],
None)[indicator]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllEqual([[0., 1., 1.]], self.evaluate(indicator_tensor))
def test_old_linear_model(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc_old.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var))
self.assertAllClose([[0.]], self.evaluate(predictions))
self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]]))
self.assertAllClose([[2. + 3.]], self.evaluate(predictions))
def test_old_linear_model_old_categorical(self):
animal = fc.indicator_column(
fc_old._categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc_old.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var))
self.assertAllClose([[0.]], self.evaluate(predictions))
self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]]))
self.assertAllClose([[2. + 3.]], self.evaluate(predictions))
def test_input_layer(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc_old.input_layer(features, [animal])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
def test_input_layer_old_categorical(self):
animal = fc.indicator_column(
fc_old._categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc_old.input_layer(features, [animal])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
def test_serialization(self):
parent = fc.categorical_column_with_identity('animal', num_buckets=4)
animal = fc.indicator_column(parent)
self.assertEqual([parent], animal.parents)
config = animal.get_config()
self.assertEqual({
'categorical_column': {
'class_name': 'IdentityCategoricalColumn',
'config': {
'key': 'animal',
'default_value': None,
'number_buckets': 4
}
}
}, config)
new_animal = fc.IndicatorColumn.from_config(config)
self.assertEqual(animal, new_animal)
self.assertIsNot(parent, new_animal.categorical_column)
new_animal = fc.IndicatorColumn.from_config(
config,
columns_by_name={
serialization._column_name_with_class_name(parent): parent
})
self.assertEqual(animal, new_animal)
self.assertIs(parent, new_animal.categorical_column)
| IndicatorColumnTest |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 17785,
"end": 18507
} | class ____(Benchmark):
param_names = ['sparse_type', 'density']
params = [
['spmatrix', 'sparray'],
np.arange(0, 1.1, 0.1).tolist(),
]
def setup(self, sparse_type, density):
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
self.nrows = 1000
self.ncols = 1000
self.format = 'csr'
def time_rand(self, sparse_type, density):
if sparse_type == "sparray":
self.X = sparse.random_array(
(self.nrows, self.ncols), format=self.format, density=density
)
else:
self.X = sparse.random(
self.nrows, self.ncols, format=self.format, density=density
)
| Random |
python | doocs__leetcode | lcof/面试题66. 构建乘积数组/Solution.py | {
"start": 0,
"end": 327
} | class ____:
def constructArr(self, a: List[int]) -> List[int]:
n = len(a)
ans = [0] * n
left = right = 1
for i in range(n):
ans[i] = left
left *= a[i]
for i in range(n - 1, -1, -1):
ans[i] *= right
right *= a[i]
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 151384,
"end": 154825
} | class ____(Qwen2_5OmniPreTrainedModel):
config: Qwen2_5OmniBigVGANConfig
input_modalities = "audio"
def __init__(self, config: Qwen2_5OmniBigVGANConfig):
super().__init__(config)
self.num_residual_blocks = len(config.resblock_kernel_sizes)
self.num_upsample_layers = len(config.upsample_rates)
self.conv_pre = nn.Conv1d(config.mel_dim, config.upsample_initial_channel, 7, 1, padding=3)
# Removing extra ModuleList breaks official state dict
ups = [
nn.ModuleList(
[
nn.ConvTranspose1d(
config.upsample_initial_channel // (2**layer_idx),
config.upsample_initial_channel // (2 ** (layer_idx + 1)),
kernel_size,
stride,
padding=(kernel_size - stride) // 2,
)
]
)
for layer_idx, (stride, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes))
]
self.ups = nn.ModuleList(ups)
self.resblocks = nn.ModuleList(
[
AMPBlock(config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, dilation)
for layer_idx in range(self.num_upsample_layers)
for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes)
]
)
self.activation_post = TorchActivation1d(
activation=SnakeBeta(config.upsample_initial_channel // (2**self.num_upsample_layers))
)
self.conv_post = nn.Conv1d(
config.upsample_initial_channel // (2**self.num_upsample_layers), 1, 7, 1, padding=3, bias=False
)
def normalize_spectrogram(self, spectrogram, max_value, min_db):
return torch.clamp((2 * max_value) * ((spectrogram - min_db) / (-min_db)) - max_value, -max_value, max_value)
def amplitude_to_db(self, amplitude, min_db_level):
min_level = torch.exp(
torch.tensor(min_db_level / 20.0 * np.log(10), device=amplitude.device, dtype=amplitude.dtype)
)
return 20 * torch.log10(torch.clamp(amplitude, min=min_level))
def process_mel_spectrogram(self, mel_spectrogram):
amplitude_spectrum = torch.exp(mel_spectrogram)
decibel_spectrum = self.amplitude_to_db(amplitude_spectrum, -115) - 20
return self.normalize_spectrogram(decibel_spectrum, 1, -115)
def forward(self, mel_spectrogram):
processed_spectrogram = self.process_mel_spectrogram(mel_spectrogram)
hidden_representation = self.conv_pre(processed_spectrogram)
for layer_index in range(self.num_upsample_layers):
hidden_representation = self.ups[layer_index][0](hidden_representation)
residual_output = sum(
self.resblocks[layer_index * self.num_residual_blocks + block_index](hidden_representation)
for block_index in range(self.num_residual_blocks)
)
residual_output = residual_output / self.num_residual_blocks
hidden_representation = residual_output
hidden_representation = self.activation_post(hidden_representation)
output_waveform = self.conv_post(hidden_representation)
return torch.clamp(output_waveform, min=-1.0, max=1.0).squeeze().cpu()
| Qwen2_5OmniToken2WavBigVGANModel |
python | doocs__leetcode | solution/2500-2599/2509.Cycle Length Queries in a Tree/Solution.py | {
"start": 0,
"end": 359
} | class ____:
def cycleLengthQueries(self, n: int, queries: List[List[int]]) -> List[int]:
ans = []
for a, b in queries:
t = 1
while a != b:
if a > b:
a >>= 1
else:
b >>= 1
t += 1
ans.append(t)
return ans
| Solution |
python | getsentry__sentry | src/sentry/models/rule.py | {
"start": 5611,
"end": 6025
} | class ____(Model):
__relocation_scope__ = RelocationScope.Organization
rule = FlexibleForeignKey("sentry.Rule")
user_id = HybridCloudForeignKey("sentry.User", on_delete="SET_NULL", null=True)
type = models.IntegerField()
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_ruleactivity"
@region_silo_model
| RuleActivity |
python | pandas-dev__pandas | pandas/tests/indexing/test_loc.py | {
"start": 62211,
"end": 71172
} | class ____:
@pytest.mark.parametrize(
"keys, expected",
[
(["b", "a"], [["b", "b", "a", "a"], [1, 2, 1, 2]]),
(["a", "b"], [["a", "a", "b", "b"], [1, 2, 1, 2]]),
((["a", "b"], [1, 2]), [["a", "a", "b", "b"], [1, 2, 1, 2]]),
((["a", "b"], [2, 1]), [["a", "a", "b", "b"], [2, 1, 2, 1]]),
((["b", "a"], [2, 1]), [["b", "b", "a", "a"], [2, 1, 2, 1]]),
((["b", "a"], [1, 2]), [["b", "b", "a", "a"], [1, 2, 1, 2]]),
((["c", "a"], [2, 1]), [["c", "a", "a"], [1, 2, 1]]),
],
)
@pytest.mark.parametrize("dim", ["index", "columns"])
def test_loc_getitem_multilevel_index_order(self, dim, keys, expected):
# GH#22797
# Try to respect order of keys given for MultiIndex.loc
kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]}
df = DataFrame(np.arange(25).reshape(5, 5), **kwargs)
exp_index = MultiIndex.from_arrays(expected)
if dim == "index":
res = df.loc[keys, :]
tm.assert_index_equal(res.index, exp_index)
elif dim == "columns":
res = df.loc[:, keys]
tm.assert_index_equal(res.columns, exp_index)
def test_loc_preserve_names(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.loc[2000]
result2 = ymd["A"].loc[2000]
assert result.index.names == ymd.index.names[1:]
assert result2.index.names == ymd.index.names[1:]
result = ymd.loc[2000, 2]
result2 = ymd["A"].loc[2000, 2]
assert result.index.name == ymd.index.names[2]
assert result2.index.name == ymd.index.names[2]
def test_loc_getitem_multiindex_nonunique_len_zero(self):
# GH#13691
mi = MultiIndex.from_product([[0], [1, 1]])
ser = Series(0, index=mi)
res = ser.loc[[]]
expected = ser[:0]
tm.assert_series_equal(res, expected)
res2 = ser.loc[ser.iloc[0:0]]
tm.assert_series_equal(res2, expected)
def test_loc_getitem_access_none_value_in_multiindex(self):
# GH#34318: test that you can access a None value using .loc
# through a Multiindex
ser = Series([None], MultiIndex.from_arrays([["Level1"], ["Level2"]]))
result = ser.loc[("Level1", "Level2")]
assert result is None
midx = MultiIndex.from_product([["Level1"], ["Level2_a", "Level2_b"]])
ser = Series([None] * len(midx), dtype=object, index=midx)
result = ser.loc[("Level1", "Level2_a")]
assert result is None
ser = Series([1] * len(midx), dtype=object, index=midx)
result = ser.loc[("Level1", "Level2_a")]
assert result == 1
def test_loc_setitem_multiindex_slice(self):
# GH 34870
index = MultiIndex.from_tuples(
zip(
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
),
names=["first", "second"],
)
result = Series([1, 1, 1, 1, 1, 1, 1, 1], index=index)
result.loc[("baz", "one") : ("foo", "two")] = 100
expected = Series([1, 1, 100, 100, 100, 100, 1, 1], index=index)
tm.assert_series_equal(result, expected)
def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self):
times = date_range("2000-01-01", freq="10min", periods=100000)
ser = Series(range(100000), times)
result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
tm.assert_series_equal(result, ser)
def test_loc_getitem_datetime_string_with_datetimeindex(self):
# GH 16710
df = DataFrame(
{"a": range(10), "b": range(10)},
index=date_range("2010-01-01", "2010-01-10", unit="ns"),
)
result = df.loc[["2010-01-01", "2010-01-05"], ["a", "b"]]
expected = DataFrame(
{"a": [0, 4], "b": [0, 4]},
index=DatetimeIndex(["2010-01-01", "2010-01-05"]).as_unit("ns"),
)
tm.assert_frame_equal(result, expected)
def test_loc_getitem_sorted_index_level_with_duplicates(self):
# GH#4516 sorting a MultiIndex with duplicates and multiple dtypes
mi = MultiIndex.from_tuples(
[
("foo", "bar"),
("foo", "bar"),
("bah", "bam"),
("bah", "bam"),
("foo", "bar"),
("bah", "bam"),
],
names=["A", "B"],
)
df = DataFrame(
[
[1.0, 1],
[2.0, 2],
[3.0, 3],
[4.0, 4],
[5.0, 5],
[6.0, 6],
],
index=mi,
columns=["C", "D"],
)
df = df.sort_index(level=0)
expected = DataFrame(
[[1.0, 1], [2.0, 2], [5.0, 5]], columns=["C", "D"], index=mi.take([0, 1, 4])
)
result = df.loc[("foo", "bar")]
tm.assert_frame_equal(result, expected)
def test_additional_element_to_categorical_series_loc(self):
# GH#47677
result = Series(["a", "b", "c"], dtype="category")
result.loc[3] = 0
expected = Series(["a", "b", "c", 0], dtype="object")
tm.assert_series_equal(result, expected)
def test_additional_categorical_element_loc(self):
# GH#47677
result = Series(["a", "b", "c"], dtype="category")
result.loc[3] = "a"
expected = Series(["a", "b", "c", "a"], dtype="category")
tm.assert_series_equal(result, expected)
def test_loc_set_nan_in_categorical_series(self, any_numeric_ea_dtype):
# GH#47677
srs = Series(
[1, 2, 3],
dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)),
)
# enlarge
srs.loc[3] = np.nan
expected = Series(
[1, 2, 3, np.nan],
dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)),
)
tm.assert_series_equal(srs, expected)
# set into
srs.loc[1] = np.nan
expected = Series(
[1, np.nan, 3, np.nan],
dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)),
)
tm.assert_series_equal(srs, expected)
@pytest.mark.parametrize("na", (np.nan, pd.NA, None, pd.NaT))
def test_loc_consistency_series_enlarge_set_into(self, na):
# GH#47677
srs_enlarge = Series(["a", "b", "c"], dtype="category")
srs_enlarge.loc[3] = na
srs_setinto = Series(["a", "b", "c", "a"], dtype="category")
srs_setinto.loc[3] = na
tm.assert_series_equal(srs_enlarge, srs_setinto)
expected = Series(["a", "b", "c", na], dtype="category")
tm.assert_series_equal(srs_enlarge, expected)
def test_loc_getitem_preserves_index_level_category_dtype(self):
# GH#15166
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
expected = CategoricalIndex(
["a", "b"],
categories=["a", "b"],
ordered=False,
name="Index1",
dtype="category",
)
result = df.index.levels[0]
tm.assert_index_equal(result, expected)
result = df.loc[["a"]].index.levels[0]
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("lt_value", [30, 10])
def test_loc_multiindex_levels_contain_values_not_in_index_anymore(self, lt_value):
# GH#41170
df = DataFrame({"a": [12, 23, 34, 45]}, index=[list("aabb"), [0, 1, 2, 3]])
with pytest.raises(KeyError, match=r"\['b'\] not in index"):
df.loc[df["a"] < lt_value, :].loc[["b"], :]
def test_loc_multiindex_null_slice_na_level(self):
# GH#42055
lev1 = np.array([np.nan, np.nan])
lev2 = ["bar", "baz"]
mi = MultiIndex.from_arrays([lev1, lev2])
ser = Series([0, 1], index=mi)
result = ser.loc[:, "bar"]
# TODO: should we have name="bar"?
expected = Series([0], index=[np.nan])
tm.assert_series_equal(result, expected)
def test_loc_drops_level(self):
# Based on test_series_varied_multiindex_alignment, where
# this used to fail to drop the first level
mi = MultiIndex.from_product(
[list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"]
)
ser = Series(range(8), index=mi)
loc_result = ser.loc["a", :, :]
expected = ser.index.droplevel(0)[:4]
tm.assert_index_equal(loc_result.index, expected)
| TestLocWithMultiIndex |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_X.py | {
"start": 4204,
"end": 5598
} | class ____(Benchmark):
r"""
Xin-She Yang 4 objective function.
This class defines the Xin-She Yang 4 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang04}}(x) = \left[ \sum_{i=1}^{n} \sin^2(x_i)
- e^{-\sum_{i=1}^{n} x_i^2} \right ]
e^{-\sum_{i=1}^{n} \sin^2 \sqrt{ \lvert
x_i \rvert }}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
u = sum(sin(x) ** 2)
v = sum(x ** 2)
w = sum(sin(sqrt(abs(x))) ** 2)
return (u - exp(-v)) * exp(-w)
| XinSheYang04 |
python | zarr-developers__zarr-python | src/zarr/experimental/cache_store.py | {
"start": 376,
"end": 14127
} | class ____(WrapperStore[Store]):
"""
A dual-store caching implementation for Zarr stores.
This cache wraps any Store implementation and uses a separate Store instance
as the cache backend. This provides persistent caching capabilities with
time-based expiration, size-based eviction, and flexible cache storage options.
Parameters
----------
store : Store
The underlying store to wrap with caching
cache_store : Store
The store to use for caching (can be any Store implementation)
max_age_seconds : int | None, optional
Maximum age of cached entries in seconds. None means no expiration.
Default is None.
max_size : int | None, optional
Maximum size of the cache in bytes. When exceeded, least recently used
items are evicted. None means unlimited size. Default is None.
Note: Individual values larger than max_size will not be cached.
key_insert_times : dict[str, float] | None, optional
Dictionary to track insertion times (using monotonic time).
Primarily for internal use. Default is None (creates new dict).
cache_set_data : bool, optional
Whether to cache data when it's written to the store. Default is True.
Examples
--------
```python
import zarr
from zarr.storage import MemoryStore
from zarr.experimental.cache_store import CacheStore
# Create a cached store
source_store = MemoryStore()
cache_store = MemoryStore()
cached_store = CacheStore(
store=source_store,
cache_store=cache_store,
max_age_seconds=60,
max_size=1024*1024
)
# Use it like any other store
array = zarr.create(shape=(100,), store=cached_store)
array[:] = 42
```
"""
_cache: Store
max_age_seconds: int | Literal["infinity"]
max_size: int | None
key_insert_times: dict[str, float]
cache_set_data: bool
_cache_order: OrderedDict[str, None] # Track access order for LRU
_current_size: int # Track current cache size
_key_sizes: dict[str, int] # Track size of each cached key
_lock: asyncio.Lock
_hits: int # Cache hit counter
_misses: int # Cache miss counter
_evictions: int # Cache eviction counter
def __init__(
self,
store: Store,
*,
cache_store: Store,
max_age_seconds: int | str = "infinity",
max_size: int | None = None,
key_insert_times: dict[str, float] | None = None,
cache_set_data: bool = True,
) -> None:
super().__init__(store)
if not cache_store.supports_deletes:
msg = (
f"The provided cache store {cache_store} does not support deletes. "
"The cache_store must support deletes for CacheStore to function properly."
)
raise ValueError(msg)
self._cache = cache_store
# Validate and set max_age_seconds
if isinstance(max_age_seconds, str):
if max_age_seconds != "infinity":
raise ValueError("max_age_seconds string value must be 'infinity'")
self.max_age_seconds = "infinity"
else:
self.max_age_seconds = max_age_seconds
self.max_size = max_size
if key_insert_times is None:
self.key_insert_times = {}
else:
self.key_insert_times = key_insert_times
self.cache_set_data = cache_set_data
self._cache_order = OrderedDict()
self._current_size = 0
self._key_sizes = {}
self._lock = asyncio.Lock()
self._hits = 0
self._misses = 0
self._evictions = 0
def _is_key_fresh(self, key: str) -> bool:
"""Check if a cached key is still fresh based on max_age_seconds.
Uses monotonic time for accurate elapsed time measurement.
"""
if self.max_age_seconds == "infinity":
return True
now = time.monotonic()
elapsed = now - self.key_insert_times.get(key, 0)
return elapsed < self.max_age_seconds
async def _accommodate_value(self, value_size: int) -> None:
"""Ensure there is enough space in the cache for a new value.
Must be called while holding self._lock.
"""
if self.max_size is None:
return
# Remove least recently used items until we have enough space
while self._current_size + value_size > self.max_size and self._cache_order:
# Get the least recently used key (first in OrderedDict)
lru_key = next(iter(self._cache_order))
await self._evict_key(lru_key)
async def _evict_key(self, key: str) -> None:
"""Evict a key from the cache.
Must be called while holding self._lock.
Updates size tracking atomically with deletion.
"""
try:
key_size = self._key_sizes.get(key, 0)
# Delete from cache store
await self._cache.delete(key)
# Update tracking after successful deletion
self._remove_from_tracking(key)
self._current_size = max(0, self._current_size - key_size)
self._evictions += 1
logger.debug("_evict_key: evicted key %s, freed %d bytes", key, key_size)
except Exception:
logger.exception("_evict_key: failed to evict key %s", key)
raise # Re-raise to signal eviction failure
async def _cache_value(self, key: str, value: Buffer) -> None:
"""Cache a value with size tracking.
This method holds the lock for the entire operation to ensure atomicity.
"""
value_size = len(value)
# Check if value exceeds max size
if self.max_size is not None and value_size > self.max_size:
logger.warning(
"_cache_value: value size %d exceeds max_size %d, skipping cache",
value_size,
self.max_size,
)
return
async with self._lock:
# If key already exists, subtract old size first
if key in self._key_sizes:
old_size = self._key_sizes[key]
self._current_size -= old_size
logger.debug("_cache_value: updating existing key %s, old size %d", key, old_size)
# Make room for the new value (this calls _evict_key_locked internally)
await self._accommodate_value(value_size)
# Update tracking atomically
self._cache_order[key] = None # OrderedDict to track access order
self._current_size += value_size
self._key_sizes[key] = value_size
self.key_insert_times[key] = time.monotonic()
logger.debug("_cache_value: cached key %s with size %d bytes", key, value_size)
async def _update_access_order(self, key: str) -> None:
"""Update the access order for LRU tracking."""
if key in self._cache_order:
async with self._lock:
# Move to end (most recently used)
self._cache_order.move_to_end(key)
def _remove_from_tracking(self, key: str) -> None:
"""Remove a key from all tracking structures.
Must be called while holding self._lock.
"""
self._cache_order.pop(key, None)
self.key_insert_times.pop(key, None)
self._key_sizes.pop(key, None)
async def _get_try_cache(
self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None
) -> Buffer | None:
"""Try to get data from cache first, falling back to source store."""
maybe_cached_result = await self._cache.get(key, prototype, byte_range)
if maybe_cached_result is not None:
logger.debug("_get_try_cache: key %s found in cache (HIT)", key)
self._hits += 1
# Update access order for LRU
await self._update_access_order(key)
return maybe_cached_result
else:
logger.debug(
"_get_try_cache: key %s not found in cache (MISS), fetching from store", key
)
self._misses += 1
maybe_fresh_result = await super().get(key, prototype, byte_range)
if maybe_fresh_result is None:
# Key doesn't exist in source store
await self._cache.delete(key)
async with self._lock:
self._remove_from_tracking(key)
else:
# Cache the newly fetched value
await self._cache.set(key, maybe_fresh_result)
await self._cache_value(key, maybe_fresh_result)
return maybe_fresh_result
async def _get_no_cache(
self, key: str, prototype: BufferPrototype, byte_range: ByteRequest | None = None
) -> Buffer | None:
"""Get data directly from source store and update cache."""
self._misses += 1
maybe_fresh_result = await super().get(key, prototype, byte_range)
if maybe_fresh_result is None:
# Key doesn't exist in source, remove from cache and tracking
await self._cache.delete(key)
async with self._lock:
self._remove_from_tracking(key)
else:
logger.debug("_get_no_cache: key %s found in store, setting in cache", key)
await self._cache.set(key, maybe_fresh_result)
await self._cache_value(key, maybe_fresh_result)
return maybe_fresh_result
async def get(
self,
key: str,
prototype: BufferPrototype,
byte_range: ByteRequest | None = None,
) -> Buffer | None:
"""
Retrieve data from the store, using cache when appropriate.
Parameters
----------
key : str
The key to retrieve
prototype : BufferPrototype
Buffer prototype for creating the result buffer
byte_range : ByteRequest, optional
Byte range to retrieve
Returns
-------
Buffer | None
The retrieved data, or None if not found
"""
if not self._is_key_fresh(key):
logger.debug("get: key %s is not fresh, fetching from store", key)
return await self._get_no_cache(key, prototype, byte_range)
else:
logger.debug("get: key %s is fresh, trying cache", key)
return await self._get_try_cache(key, prototype, byte_range)
async def set(self, key: str, value: Buffer) -> None:
"""
Store data in the underlying store and optionally in cache.
Parameters
----------
key : str
The key to store under
value : Buffer
The data to store
"""
logger.debug("set: setting key %s in store", key)
await super().set(key, value)
if self.cache_set_data:
logger.debug("set: setting key %s in cache", key)
await self._cache.set(key, value)
await self._cache_value(key, value)
else:
logger.debug("set: deleting key %s from cache", key)
await self._cache.delete(key)
async with self._lock:
self._remove_from_tracking(key)
async def delete(self, key: str) -> None:
"""
Delete data from both the underlying store and cache.
Parameters
----------
key : str
The key to delete
"""
logger.debug("delete: deleting key %s from store", key)
await super().delete(key)
logger.debug("delete: deleting key %s from cache", key)
await self._cache.delete(key)
async with self._lock:
self._remove_from_tracking(key)
def cache_info(self) -> dict[str, Any]:
"""Return information about the cache state."""
return {
"cache_store_type": type(self._cache).__name__,
"max_age_seconds": "infinity"
if self.max_age_seconds == "infinity"
else self.max_age_seconds,
"max_size": self.max_size,
"current_size": self._current_size,
"cache_set_data": self.cache_set_data,
"tracked_keys": len(self.key_insert_times),
"cached_keys": len(self._cache_order),
}
def cache_stats(self) -> dict[str, Any]:
"""Return cache performance statistics."""
total_requests = self._hits + self._misses
hit_rate = self._hits / total_requests if total_requests > 0 else 0.0
return {
"hits": self._hits,
"misses": self._misses,
"evictions": self._evictions,
"total_requests": total_requests,
"hit_rate": hit_rate,
}
async def clear_cache(self) -> None:
"""Clear all cached data and tracking information."""
# Clear the cache store if it supports clear
if hasattr(self._cache, "clear"):
await self._cache.clear()
# Reset tracking
async with self._lock:
self.key_insert_times.clear()
self._cache_order.clear()
self._key_sizes.clear()
self._current_size = 0
logger.debug("clear_cache: cleared all cache data")
def __repr__(self) -> str:
"""Return string representation of the cache store."""
return (
f"{self.__class__.__name__}("
f"store={self._store!r}, "
f"cache_store={self._cache!r}, "
f"max_age_seconds={self.max_age_seconds}, "
f"max_size={self.max_size}, "
f"current_size={self._current_size}, "
f"cached_keys={len(self._cache_order)})"
)
| CacheStore |
python | explosion__spaCy | spacy/pipeline/functions.py | {
"start": 2307,
"end": 4374
} | class ____:
def __init__(self, min_length: int = 0, split_length: int = 0):
self.min_length = min_length
self.split_length = split_length
def __call__(self, doc: Doc) -> Doc:
if self.min_length > 0 and self.split_length > 0:
with doc.retokenize() as retokenizer:
for t in doc:
if len(t.text) >= self.min_length:
orths = []
heads = []
attrs = {} # type: ignore[var-annotated]
for i in range(0, len(t.text), self.split_length):
orths.append(t.text[i : i + self.split_length])
heads.append((t, i / self.split_length))
retokenizer.split(t, orths, heads, attrs) # type: ignore[arg-type]
return doc
def _get_config(self) -> Dict[str, Any]:
return {
"min_length": self.min_length,
"split_length": self.split_length,
}
def _set_config(self, config: Dict[str, Any] = {}) -> None:
self.min_length = config.get("min_length", 0)
self.split_length = config.get("split_length", 0)
def to_bytes(self, **kwargs):
serializers = {
"cfg": lambda: srsly.json_dumps(self._get_config()),
}
return util.to_bytes(serializers, [])
def from_bytes(self, data, **kwargs):
deserializers = {
"cfg": lambda b: self._set_config(srsly.json_loads(b)),
}
util.from_bytes(data, deserializers, [])
return self
def to_disk(self, path, **kwargs):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: srsly.write_json(p, self._get_config()),
}
return util.to_disk(path, serializers, [])
def from_disk(self, path, **kwargs):
path = util.ensure_path(path)
serializers = {
"cfg": lambda p: self._set_config(srsly.read_json(p)),
}
util.from_disk(path, serializers, [])
| TokenSplitter |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_organization_seer_explorer_update.py | {
"start": 3909,
"end": 4871
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.organization = self.create_organization(owner=self.user)
self.url = f"/api/0/organizations/{self.organization.slug}/seer/explorer-update/123/"
@patch("sentry.seer.endpoints.organization_seer_explorer_update.get_seer_org_acknowledgement")
def test_explorer_update_feature_flag_disabled(
self, mock_get_seer_org_acknowledgement: MagicMock
) -> None:
mock_get_seer_org_acknowledgement.return_value = True
response = self.client.post(
self.url,
data={
"payload": {
"type": "interrupt",
},
},
format="json",
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Feature flag not enabled" in str(response.data)
| TestOrganizationSeerExplorerUpdateFeatureFlags |
python | ray-project__ray | python/ray/llm/_internal/batch/observability/usage_telemetry/usage.py | {
"start": 832,
"end": 1605
} | class ____(str, Enum):
"""Telemetry tags for RayLLM Batch."""
LLM_BATCH_PROCESSOR_CONFIG_NAME = "LLM_BATCH_PROCESSOR_CONFIG_NAME"
LLM_BATCH_MODEL_ARCHITECTURE = "LLM_BATCH_MODEL_ARCHITECTURE"
LLM_BATCH_SIZE = "LLM_BATCH_SIZE"
LLM_BATCH_ACCELERATOR_TYPE = "LLM_BATCH_ACCELERATOR_TYPE"
LLM_BATCH_CONCURRENCY = "LLM_BATCH_CONCURRENCY"
LLM_BATCH_TASK_TYPE = "LLM_BATCH_TASK_TYPE"
LLM_BATCH_PIPELINE_PARALLEL_SIZE = "LLM_BATCH_PIPELINE_PARALLEL_SIZE"
LLM_BATCH_TENSOR_PARALLEL_SIZE = "LLM_BATCH_TENSOR_PARALLEL_SIZE"
LLM_BATCH_DATA_PARALLEL_SIZE = "LLM_BATCH_DATA_PARALLEL_SIZE"
@ray.remote(
name=LLM_BATCH_TELEMETRY_ACTOR_NAME,
namespace=LLM_BATCH_TELEMETRY_NAMESPACE,
num_cpus=0,
lifetime="detached",
)
| BatchTelemetryTags |
python | plotly__plotly.py | plotly/graph_objs/scattersmith/_line.py | {
"start": 233,
"end": 8428
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattersmith"
_path_str = "scattersmith.line"
_valid_props = {
"backoff",
"backoffsrc",
"color",
"dash",
"shape",
"smoothing",
"width",
}
@property
def backoff(self):
"""
Sets the line back off from the end point of the nth line
segment (in px). This option is useful e.g. to avoid overlap
with arrowhead markers. With "auto" the lines would trim before
markers if `marker.angleref` is set to "previous".
The 'backoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["backoff"]
@backoff.setter
def backoff(self, val):
self["backoff"] = val
@property
def backoffsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `backoff`.
The 'backoffsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["backoffsrc"]
@backoffsrc.setter
def backoffsrc(self, val):
self["backoffsrc"] = val
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def shape(self):
"""
Determines the line shape. With "spline" the lines are drawn
using spline interpolation. The other available values
correspond to step-wise line shapes.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'spline']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
@property
def smoothing(self):
"""
Has an effect only if `shape` is set to "spline" Sets the
amount of smoothing. 0 corresponds to no smoothing (equivalent
to a "linear" shape).
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
backoff
Sets the line back off from the end point of the nth
line segment (in px). This option is useful e.g. to
avoid overlap with arrowhead markers. With "auto" the
lines would trim before markers if `marker.angleref` is
set to "previous".
backoffsrc
Sets the source reference on Chart Studio Cloud for
`backoff`.
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
backoff=None,
backoffsrc=None,
color=None,
dash=None,
shape=None,
smoothing=None,
width=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.Line`
backoff
Sets the line back off from the end point of the nth
line segment (in px). This option is useful e.g. to
avoid overlap with arrowhead markers. With "auto" the
lines would trim before markers if `marker.angleref` is
set to "previous".
backoffsrc
Sets the source reference on Chart Studio Cloud for
`backoff`.
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattersmith.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("backoff", arg, backoff)
self._set_property("backoffsrc", arg, backoffsrc)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("shape", arg, shape)
self._set_property("smoothing", arg, smoothing)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | django__django | django/db/models/lookups.py | {
"start": 24140,
"end": 24723
} | class ____(BuiltinLookup):
lookup_name = "regex"
prepare_rhs = False
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super().as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = (*lhs_params, *rhs_params)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), params
@Field.register_lookup
| Regex |
python | bokeh__bokeh | src/bokeh/core/property/wrappers.py | {
"start": 5586,
"end": 7856
} | class ____(PropertyValueContainer, list[T]):
""" A list property value container that supports change notifications on
mutating operations.
When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are
transparently created to wrap those values. These ``PropertyValueList``
values are subject to normal property validation. If the property type
``foo = List(Str)`` then attempting to set ``x.foo[0] = 10`` will raise
an error.
Instances of ``PropertyValueList`` can be explicitly created by passing
any object that the standard list initializer accepts, for example:
.. code-block:: python
>>> PropertyValueList([10, 20])
[10, 20]
>>> PropertyValueList((10, 20))
[10, 20]
The following mutating operations on lists automatically trigger
notifications:
.. code-block:: python
del x[y]
del x[i:j]
x += y
x *= y
x[i] = y
x[i:j] = y
x.append
x.extend
x.insert
x.pop
x.remove
x.reverse
x.sort
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def _saved_copy(self) -> list[T]:
return list(self)
# delete x[y]
@notify_owner
def __delitem__(self, y):
return super().__delitem__(y)
# x += y
@notify_owner
def __iadd__(self, y):
return super().__iadd__(y)
# x *= y
@notify_owner
def __imul__(self, y):
return super().__imul__(y)
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super().__setitem__(i, y)
@notify_owner
def append(self, obj):
return super().append(obj)
@notify_owner
def extend(self, iterable):
return super().extend(iterable)
@notify_owner
def insert(self, index, obj):
return super().insert(index, obj)
@notify_owner
def pop(self, index=-1):
return super().pop(index)
@notify_owner
def remove(self, obj):
return super().remove(obj)
@notify_owner
def reverse(self):
return super().reverse()
@notify_owner
def sort(self, **kwargs):
return super().sort(**kwargs)
| PropertyValueList |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow_start_noblock.py | {
"start": 187,
"end": 946
} | class ____(BaseExecutor):
def post_init(self):
time.sleep(4)
@pytest.mark.slow
def test_flow_slow_executor_intra():
f = Flow().add(uses='SlowExecutor', shards=2)
with f, TimeContext('start flow') as tc:
assert tc.now() < 8
@pytest.mark.slow
def test_flow_slow_executor_inter():
f = Flow().add(uses='SlowExecutor', shards=3).add(uses='SlowExecutor', shards=3)
with f, TimeContext('start flow') as tc:
assert tc.now() < 8
@pytest.mark.slow
def test_flow_slow_executor_bad_fail_early():
f = Flow().add(uses='SlowExecutor', shards=3).add(uses='BADNAME_EXECUTOR', shards=3)
with pytest.raises(RuntimeFailToStart):
with f, TimeContext('start flow') as tc:
assert tc.now() < 8
| SlowExecutor |
python | PyCQA__pycodestyle | testing/data/E30not.py | {
"start": 371,
"end": 979
} | class ____:
def a():
pass
# comment
def b():
pass
@property
def c():
pass
try:
from nonexistent import Bar
except ImportError:
class Bar(object):
"""This is a Bar replacement"""
def with_feature(f):
"""Some decorator"""
wrapper = f
if has_this_feature(f):
def wrapper(*args):
call_feature(args[0])
return f(*args)
return wrapper
try:
next
except NameError:
def next(iterator, default):
for item in iterator:
return item
return default
def a():
pass
| Y |
python | pytorch__pytorch | .github/scripts/test_trymerge.py | {
"start": 8441,
"end": 23111
} | class ____(TestCase):
def test_merge_rules_valid(self, *args: Any) -> None:
"Test that merge_rules.yaml can be parsed"
repo = DummyGitRepo()
merge_rules = read_merge_rules(repo, "pytorch", "pytorch")
self.assertGreater(len(merge_rules), 1)
@mock.patch("trymerge.read_merge_rules", side_effect=mocked_read_merge_rules)
def test_match_rules(self, *args: Any) -> None:
"Tests that PR passes merge rules"
pr = GitHubPR("pytorch", "pytorch", 109999)
repo = DummyGitRepo()
self.assertTrue(find_matching_merge_rule(pr, repo) is not None)
@mock.patch("trymerge.read_merge_rules", side_effect=mocked_read_merge_rules_raise)
def test_read_merge_rules_fails(self, *args: Any) -> None:
"Tests that PR fails to read the merge rules"
pr = GitHubPR("pytorch", "pytorch", 77700)
repo = DummyGitRepo()
self.assertRaisesRegex(
RuntimeError, "testing", lambda: find_matching_merge_rule(pr, repo)
)
@mock.patch(
"trymerge.read_merge_rules", side_effect=mocked_read_merge_rules_approvers
)
def test_match_rules_approvers(self, *args: Any) -> None:
"Tests that PR has the necessary approvers"
repo = DummyGitRepo()
pr = GitHubPR("pytorch", "pytorch", 115329)
# Test that all potential approvers across all rules are listed if the
# PR doesn't have one of them
for mock_rule in ["Core Reviewers", "Core Maintainers"]:
self.assertRaisesRegex(
RuntimeError,
mock_rule,
lambda: find_matching_merge_rule(pr, repo),
)
pr = GitHubPR("pytorch", "pytorch", 115495)
# Test that PR with the correct approvers doesn't raise any exception
self.assertTrue(find_matching_merge_rule(pr, repo) is not None)
@mock.patch("trymerge.read_merge_rules", side_effect=mocked_read_merge_rules)
def test_lint_fails(self, *args: Any) -> None:
"Tests that PR fails mandatory lint check"
pr = GitHubPR("pytorch", "pytorch", 90791)
repo = DummyGitRepo()
self.assertRaises(RuntimeError, lambda: find_matching_merge_rule(pr, repo))
def test_get_last_comment(self, *args: Any) -> None:
"Tests that last comment can be fetched"
pr = GitHubPR("pytorch", "pytorch", 71759)
comment = pr.get_last_comment()
self.assertEqual(comment.author_login, "github-actions")
self.assertIsNone(comment.editor_login)
self.assertTrue("You've committed this PR" in comment.body_text)
def test_get_author_null(self, *args: Any) -> None:
"""Tests that PR author can be computed
If reply contains NULL
"""
pr = GitHubPR("pytorch", "pytorch", 71759)
author = pr.get_author()
self.assertTrue(author is not None)
self.assertTrue("@" in author)
self.assertTrue(pr.get_diff_revision() is None)
# PR with multiple contributors, but creator id is not among authors
pr = GitHubPR("pytorch", "pytorch", 75095)
self.assertEqual(pr.get_pr_creator_login(), "mruberry")
author = pr.get_author()
self.assertTrue(author is not None)
def test_large_diff(self, *args: Any) -> None:
"Tests that PR with 100+ files can be fetched"
pr = GitHubPR("pytorch", "pytorch", 73099)
self.assertTrue(pr.get_changed_files_count() > 100)
flist = pr.get_changed_files()
self.assertEqual(len(flist), pr.get_changed_files_count())
def test_internal_changes(self, *args: Any) -> None:
"Tests that PR with internal changes is detected"
pr = GitHubPR("pytorch", "pytorch", 110140)
self.assertTrue(pr.has_internal_changes())
def test_comments_pagination(self, *args: Any) -> None:
"Tests that PR with 50+ comments can be fetched"
pr = GitHubPR("pytorch", "pytorch", 31093)
self.assertGreater(len(pr.get_comments()), 50)
def test_gql_complexity(self, *args: Any) -> None:
"Fetch comments and conclusions for PR with 60 commits"
# Previous version of GrapQL query used to cause HTTP/502 error
# see https://gist.github.com/malfet/9b93bc7eeddeaf1d84546efc4f0c577f
pr = GitHubPR("pytorch", "pytorch", 68111)
self.assertGreater(len(pr.get_comments()), 20)
# NS(09/27/2023): GitHub seems to recycle older checkruns
# https://github.com/pytorch/pytorch/pull/68111/checks shows 0 runs
# self.assertGreater(len(pr.get_checkrun_conclusions()), 3)
self.assertGreater(pr.get_commit_count(), 60)
@skip("GitHub doesn't keep this data anymore")
def test_gql_retrieve_checksuites(self, *args: Any) -> None:
"Fetch comments and conclusions for PR with 60 commits"
pr = GitHubPR("pytorch", "pytorch", 94787)
self.assertEqual(len(pr.get_checkrun_conclusions()), 182)
def test_team_members(self, *args: Any) -> None:
"Test fetching team members works"
dev_infra_team = gh_get_team_members("pytorch", "pytorch-dev-infra")
self.assertGreater(len(dev_infra_team), 2)
with self.assertWarns(Warning):
non_existing_team = gh_get_team_members("pytorch", "qwertyuiop")
self.assertEqual(len(non_existing_team), 0)
def test_get_author_many_commits(self, *args: Any) -> None:
"""Tests that authors for all commits can be fetched"""
pr = GitHubPR("pytorch", "pytorch", 76118)
authors = pr.get_authors()
self.assertGreater(pr.get_commit_count(), 100)
self.assertGreater(len(authors), 50)
self.assertTrue("@" in pr.get_author())
@mock.patch("trymerge.read_merge_rules", side_effect=mocked_read_merge_rules_NE)
def test_pending_status_check(self, *args: Any) -> None:
"""Tests that PR with nonexistent/pending status checks fails with the right reason."""
pr = GitHubPR("pytorch", "pytorch", 76118)
repo = DummyGitRepo()
self.assertRaisesRegex(
MandatoryChecksMissingError,
".*are pending/not yet run.*",
lambda: find_matching_merge_rule(pr, repo),
)
def test_get_author_many_reviews(self, *args: Any) -> None:
"""Tests that all reviews can be fetched"""
pr = GitHubPR("pytorch", "pytorch", 76123)
approved_by = pr.get_approved_by()
self.assertGreater(len(approved_by), 0)
assert pr._reviews is not None # to pacify mypy
self.assertGreater(len(pr._reviews), 100)
def get_co_authors(self, *args: Any) -> None:
"""Tests that co-authors are recognized"""
pr = GitHubPR("pytorch", "pytorch", 118347)
authors = pr.get_authors()
self.assertIn("kit1980", authors)
self.assertIn("Co-authored-by:", pr.gen_commit_message())
def test_get_checkruns_many_runs(self, *args: Any) -> None:
"""Tests that all checkruns can be fetched"""
pr = GitHubPR("pytorch", "pytorch", 105260)
conclusions = pr.get_checkrun_conclusions()
self.assertEqual(len(conclusions), 221)
self.assertTrue("pull / linux-docs / build-docs-cpp-false" in conclusions)
def test_cancelled_gets_ignored(self, *args: Any) -> None:
"""Tests that cancelled workflow does not override existing successful status"""
pr = GitHubPR("pytorch", "pytorch", 110367)
conclusions = pr.get_checkrun_conclusions()
lint_checks = [name for name in conclusions if "Lint" in name]
self.assertTrue(len(lint_checks) > 0)
self.assertTrue(
all(conclusions[name].status == "SUCCESS" for name in lint_checks)
)
def test_get_review_comment_by_id(self, *args: Any) -> None:
"""Tests that even if the comment requested was actually a review instead of a simple comment, we can still find it"""
pr = GitHubPR("pytorch", "pytorch", 107070)
review_comment_id = 1582767635
comment = pr.get_comment_by_id(review_comment_id)
self.assertIsNotNone(comment)
@mock.patch("trymerge.gh_get_pr_info", return_value=mock_gh_get_info())
@mock.patch("trymerge.parse_args", return_value=mock_parse_args(True, False))
@mock.patch("trymerge.try_revert", side_effect=mock_revert)
def test_main_revert(self, mock_revert: Any, *args: Any) -> None:
trymerge_main()
mock_revert.assert_called_once()
@mock.patch("trymerge.gh_get_pr_info", return_value=mock_gh_get_info())
@mock.patch("trymerge.parse_args", return_value=mock_parse_args(False, True))
@mock.patch("trymerge.gh_remove_label", side_effect=mock_remove_label)
@mock.patch("trymerge.merge", side_effect=mock_merge)
def test_main_force(
self, mock_merge: Any, mock_parse_args: Any, *args: Any
) -> None:
trymerge_main()
mock_merge.assert_called_once_with(
mock.ANY,
mock.ANY,
comment_id=mock.ANY,
dry_run=mock.ANY,
skip_mandatory_checks=True,
ignore_current=False,
)
@mock.patch("trymerge.gh_get_pr_info", return_value=mock_gh_get_info())
@mock.patch("trymerge.parse_args", return_value=mock_parse_args(False, False))
@mock.patch("trymerge.gh_remove_label", side_effect=mock_remove_label)
@mock.patch("trymerge.merge", side_effect=mock_merge)
def test_main_merge(self, mock_merge: Any, *args: Any) -> None:
trymerge_main()
mock_merge.assert_called_once_with(
mock.ANY,
mock.ANY,
comment_id=mock.ANY,
dry_run=mock.ANY,
skip_mandatory_checks=False,
ignore_current=False,
)
@mock.patch("trymerge.read_merge_rules", side_effect=mocked_read_merge_rules)
def test_revert_rules(self, *args: Any) -> None:
"""Tests that reverts from collaborators are allowed"""
pr = GitHubPR("pytorch", "pytorch", 79694)
repo = DummyGitRepo()
self.assertIsNotNone(validate_revert(repo, pr, comment_id=1189459845))
def test_get_changed_files(self, *args: Any) -> None:
"""
Tests that the list changed files in a PR doesn't include duplicates
"""
pr = GitHubPR("pytorch", "pytorch", 95233)
try:
changed_files = pr.get_changed_files()
except RuntimeError as error:
self.fail(f"get_changed_files throws an exception: {error}")
self.assertEqual(len(changed_files), pr.get_changed_files_count())
def test_revert_codev_abandoned_diff_succeeds(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 100652)
class GitRepoCoDev(DummyGitRepo):
def commit_message(self, ref: str) -> str:
return pr.get_body()
repo = GitRepoCoDev()
validate_revert(repo, pr, comment_id=1588195237)
def test_pr_changed_submodule_detection(self, *args: Any) -> None:
# Updates submodule during dev-cycle but reverts it later
pr = GitHubPR("pytorch", "pytorch", 95045)
self.assertEqual(pr.get_changed_submodules(), [])
self.assertFalse(pr.has_invalid_submodule_updates())
# PR updates ideep
pr = GitHubPR("pytorch", "pytorch", 94939)
self.assertEqual(pr.get_changed_submodules(), ["third_party/ideep"])
self.assertTrue(pr.has_invalid_submodule_updates())
# Automated submodule update
pr = GitHubPR("pytorch", "pytorch", 91051)
self.assertEqual(pr.get_changed_submodules(), ["third_party/kineto"])
self.assertFalse(pr.has_invalid_submodule_updates())
def test_remove_job_name_suffix(self, *args: Any) -> None:
test_cases = [
{
"name": "linux-bionic-cuda12.6-py3.10-gcc9-sm86 / test (default, 1, 5, linux.g5.4xlarge.nvidia.gpu)",
"expected": "linux-bionic-cuda12.6-py3.10-gcc9-sm86 / test (default)",
},
{
"name": "android-emulator-build-test / build-and-test (default, 1, 1, ubuntu-20.04-16x)",
"expected": "android-emulator-build-test / build-and-test (default)",
},
{
"name": "linux-focal-rocm5.4.2-py3.8 / build",
"expected": "linux-focal-rocm5.4.2-py3.8 / build",
},
{
"name": "libtorch-cpu-shared-with-deps-release-build",
"expected": "libtorch-cpu-shared-with-deps-release-build",
},
{
"name": "manywheel-py3_8-cuda11_8-test / test",
"expected": "manywheel-py3_8-cuda11_8-test / test",
},
{
"name": "lintrunner / linux-job",
"expected": "lintrunner / linux-job",
},
{
"name": "Test `run_test.py` is usable without boto3",
"expected": "Test `run_test.py` is usable without boto3",
},
]
for case in test_cases:
self.assertEqual(case["expected"], remove_job_name_suffix(case["name"]))
def test_get_merge_base(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 104121)
mock_merge_base = "mocked-sha"
with mock.patch(
"trymerge.gh_fetch_merge_base", return_value=mock_merge_base
) as mocked_gh_fetch_merge_base:
self.assertEqual(mock_merge_base, pr.get_merge_base())
# Make sure that consecutive calls will use the same merge base instead of
# making another query
self.assertEqual(mock_merge_base, pr.get_merge_base())
mocked_gh_fetch_merge_base.assert_called_once()
def test_app_can_revert(self, *args: Any) -> None:
pr = GitHubPR("pytorch", "pytorch", 164660)
repo = DummyGitRepo()
app_comment_id, impostor_comment_id = 3375785595, 3377647892
# Check that app can revert
self.assertIsNotNone(validate_revert(repo, pr, comment_id=app_comment_id))
# But impostor can not
self.assertRaises(
PostCommentError,
lambda: validate_revert(repo, pr, comment_id=impostor_comment_id),
)
# Despite it's name being the name of the bot
self.assertEqual(
pr.get_comment_by_id(impostor_comment_id).author_login,
"pytorch-auto-revert",
)
@mock.patch("trymerge.gh_graphql", side_effect=mocked_gh_graphql)
@mock.patch("trymerge.gh_fetch_merge_base", return_value="")
@mock.patch(
"trymerge.get_drci_classifications", side_effect=mocked_drci_classifications
)
| TestTryMerge |
python | astropy__astropy | astropy/io/fits/hdu/table.py | {
"start": 1317,
"end": 9130
} | class ____(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
# The following flag can be used by subclasses to determine whether to load
# variable length data from the heap automatically or whether the columns
# should contain the size and offset in the heap and let the subclass
# decide when to load the data from the heap. This can be used for example
# in CompImageHDU to only load data tiles that are needed.
_load_variable_length_data = True
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(
cls,
columns,
header=None,
nrows=0,
fill=False,
character_as_bytes=False,
**kwargs,
):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs` -like
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(
coldefs, nrows=nrows, fill=fill, character_as_bytes=character_as_bytes
)
hdu = cls(
data=data, header=header, character_as_bytes=character_as_bytes, **kwargs
)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (
self._load_variable_length_data
and any(type(r) in (_FormatP, _FormatQ) for r in columns._recformats)
and self._data_size is not None
and self._data_size > self._theap
):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8, self._data_offset)
tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data = raw_data[:tbsize].view(dtype=columns.dtype, type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype, self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
data._load_variable_length_data = self._load_variable_length_data
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder(">")
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header["PCOUNT"]
data._tbsize = self._header["NAXIS1"] * self._header["NAXIS2"]
data._gap = self._theap - data._tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_load_data(self):
"""Load the data if asked to."""
if not self._data_loaded:
self.data # noqa: B018
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# recreate data from the columns
self.data = FITS_rec.from_columns(
self.columns,
nrows=self._nrows,
fill=False,
character_as_bytes=self._character_as_bytes,
)
| _TableLikeHDU |
python | tensorflow__tensorflow | tensorflow/python/eager/tensor_test.py | {
"start": 1848,
"end": 19741
} | class ____(test_util.TensorFlowTestCase):
def testScalarTensor(self):
t = _create_tensor(3, dtype=dtypes.int32)
self.assertAllEqual(t, _create_tensor(np.array(3)))
self.assertEqual(dtypes.int32, t.dtype)
self.assertEqual(0, t.shape.ndims)
self.assertAllEqual([], t.shape.as_list())
self.assertIn("tf.Tensor", str(t))
self.assertIn("tf.Tensor", repr(t))
def testBadConstructorArgs(self):
context.ensure_initialized()
ctx = context.context()
device = ctx.device_name
# Missing device.
with self.assertRaisesRegex(TypeError, r".*argument 'device' \(pos 2\).*"):
ops.EagerTensor(1)
# Bad dtype type.
with self.assertRaisesRegex(TypeError,
"Expecting a DataType value for dtype. Got"):
ops.EagerTensor(1, device=device, dtype="1")
# Following errors happen when trying to copy to GPU.
if not test_util.is_gpu_available():
self.skipTest("No GPUs found")
with ops.device("/device:GPU:0"):
# Bad device.
with self.assertRaisesRegex(TypeError, "Error parsing device argument"):
ops.EagerTensor(1.0, device=1)
def testNumpyValue(self):
values = np.array([3.0])
t = _create_tensor(values)
self.assertAllEqual(values, t)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testNumpyDtypeSurvivesThroughTensorConversion(self):
scalar_creators = [np.int32, np.int64, np.float32, np.float64]
conversion_functions = [ops.convert_to_tensor, constant_op.constant]
for scalar_creator in scalar_creators:
for conversion_function in conversion_functions:
np_val = scalar_creator(3)
tensor_val = conversion_function(np_val)
self.assertEqual(tensor_val.numpy().dtype, np_val.dtype)
self.assertEqual(tensor_val.numpy(), np_val)
def testNumpyValueWithCast(self):
values = np.array([3.0], dtype=np.float32)
t = _create_tensor(values, dtype=dtypes.float64)
self.assertAllEqual(values, t)
ctx = context.context()
# Bad dtype value.
with self.assertRaisesRegex(TypeError, "Invalid dtype argument value"):
# The max value of TF_DataType is 33, so using 34 for the dtype fails.
ops.EagerTensor(values, device=ctx.device_name, dtype=34)
def testNumpyOrderHandling(self):
n = np.array([[1, 2], [3, 4]], order="F")
t = _create_tensor(n)
self.assertAllEqual([[1, 2], [3, 4]], t)
def testNumpyArrayDtype(self):
tensor = constant_op.constant([1.0, 2.0, 3.0])
numpy_tensor = np.asarray(tensor, dtype=np.int32)
self.assertAllEqual(numpy_tensor, [1, 2, 3])
def testNdimsAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(numpy_tensor.ndim, tensor.ndim)
def testLenAgreesWithNumpy(self):
numpy_tensor = np.asarray(1.0)
tensor = constant_op.constant(numpy_tensor)
with self.assertRaises(TypeError):
len(numpy_tensor)
with self.assertRaisesRegex(TypeError, r"Scalar tensor has no `len[(][)]`"):
len(tensor)
numpy_tensor = np.asarray([1.0, 2.0, 3.0])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(len(numpy_tensor), len(tensor))
numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]])
tensor = constant_op.constant(numpy_tensor)
self.assertAllEqual(len(numpy_tensor), len(tensor))
def testCopy(self):
t = constant_op.constant(1.0)
tt = copy.copy(t)
self.assertAllEqual(tt, 1.0)
del tt
tt = copy.deepcopy(t)
self.assertAllEqual(tt, 1.0)
del tt
self.assertAllEqual(t, 1.0)
def testConstantDtype(self):
self.assertEqual(
constant_op.constant(1, dtype=np.int64).dtype, dtypes.int64)
def testTensorAndNumpyMatrix(self):
expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32)
actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]])
self.assertAllEqual(expected, actual)
self.assertEqual(np.float32, actual.dtype)
self.assertEqual(dtypes.float32, actual.dtype)
self.assertAllEqual([2, 2], actual.shape.as_list())
def testNumpyArrayInterface(self):
class ArrayAsArrayInterface:
"""Simple class that wraps an np.array as an __array_interface__."""
def __init__(self, array):
self.array = array
@property
def __array_interface__(self):
return self.array.__array_interface__
expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32)
array_interface = ArrayAsArrayInterface(expected)
actual = _create_tensor(array_interface)
self.assertAllEqual(expected, actual)
def testFloatDowncast(self):
# Unless explicitly specified, float64->float32
t = _create_tensor(3.0)
self.assertEqual(dtypes.float32, t.dtype)
t = _create_tensor(3.0, dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t.dtype)
def testBool(self):
self.assertFalse(bool(_create_tensor(False)))
self.assertFalse(bool(_create_tensor([False])))
self.assertFalse(bool(_create_tensor([[False]])))
self.assertFalse(bool(_create_tensor([0])))
self.assertFalse(bool(_create_tensor([0.])))
self.assertTrue(bool(_create_tensor([1])))
self.assertTrue(bool(_create_tensor([1.])))
def testIndex(self):
self.assertEqual([42][_create_tensor(0)], 42)
with self.assertRaises(TypeError):
_ = [42][_create_tensor([0])]
def testIntDowncast(self):
t = _create_tensor(3)
self.assertEqual(dtypes.int32, t.dtype)
t = _create_tensor(3, dtype=dtypes.int64)
self.assertEqual(dtypes.int64, t.dtype)
t = _create_tensor(2**33)
self.assertEqual(dtypes.int64, t.dtype)
def testTensorCreationFailure(self):
with self.assertRaises(ValueError):
# Should fail because the each row of the Python object has a different
# number of columns.
self.assertEqual(None, _create_tensor([[1], [1, 2]]))
def testMultiLineTensorStr(self):
t = _create_tensor(np.eye(3))
tensor_str = str(t)
self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str)
self.assertIn(str(t), tensor_str)
def testMultiLineTensorRepr(self):
t = _create_tensor(np.eye(3))
tensor_repr = repr(t)
self.assertTrue(tensor_repr.startswith("<"))
self.assertTrue(tensor_repr.endswith(">"))
self.assertIn(
"shape=%s, dtype=%s, numpy=\n%r" % (t.shape, t.dtype.name, t.numpy()),
tensor_repr)
def testTensorStrReprObeyNumpyPrintOptions(self):
orig_threshold = np.get_printoptions()["threshold"]
orig_edgeitems = np.get_printoptions()["edgeitems"]
np.set_printoptions(threshold=2, edgeitems=1)
t = _create_tensor(np.arange(10, dtype=np.int32))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t)))
self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t)))
# Clean up: reset to previous printoptions.
np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
def testZeroDimTensorStr(self):
t = _create_tensor(42)
self.assertIn("42, shape=(), dtype=int32", str(t))
def testZeroDimTensorRepr(self):
t = _create_tensor(42)
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("shape=(), dtype=int32, numpy=42", repr(t))
def testZeroSizeTensorStr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertIn("[], shape=(0,), dtype=float32", str(t))
def testZeroSizeTensorRepr(self):
t = _create_tensor(np.zeros(0, dtype=np.float32))
self.assertTrue(repr(t).startswith("<"))
self.assertTrue(repr(t).endswith(">"))
self.assertIn("shape=(0,), dtype=float32, numpy=%r" % t.numpy(), repr(t))
def testStringTensor(self):
t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]])
t = _create_tensor(t_np_orig)
t_np = t.numpy()
self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig))
def testIterateOverTensor(self):
l = [[1, 2], [3, 4]]
t = _create_tensor(l)
for list_element, tensor_element in zip(l, t):
self.assertAllEqual(list_element, tensor_element.numpy())
def testIterateOverScalarTensorRaises(self):
t = _create_tensor(1)
with self.assertRaisesRegex(TypeError,
"Cannot iterate over a scalar tensor"):
iter(t)
@test_util.run_gpu_only
def testStringTensorOnGPU(self):
with ops.device("/device:GPU:0"):
t = _create_tensor("test string")
self.assertIn("GPU", t.device)
def testInvalidUTF8ProducesReasonableError(self):
if sys.version_info[0] < 3:
self.skipTest("Test is only valid in python3.")
with self.assertRaises(UnicodeDecodeError):
io_ops.read_file(b"\xff")
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferredDtypeIsRespected(self):
self.assertEqual(
ops.convert_to_tensor(0.5, preferred_dtype=dtypes.int32).dtype,
dtypes.float32)
self.assertEqual(
ops.convert_to_tensor(0.5, preferred_dtype=dtypes.float64).dtype,
dtypes.float64)
@test_util.run_in_graph_and_eager_modes
def testCompatibility(self):
integer_types = [
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8,
dtypes.uint16, dtypes.uint32, dtypes.uint64
]
# Floats are not compatible with ints
for t in integer_types:
with self.assertRaises(TypeError):
constant_op.constant(0.5, dtype=t)
# Ints compatible with floats
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float16)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float32)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.float64)), 5.0)
self.assertEqual(
self.evaluate(constant_op.constant(5, dtype=dtypes.bfloat16)), 5.0)
# Ints and floats are compatible with complex types
self.assertEqual(
constant_op.constant([[1.0]], dtype=dtypes.complex128).dtype,
dtypes.complex128)
self.assertEqual(
constant_op.constant([[1]], dtype=dtypes.complex128).dtype,
dtypes.complex128)
# Quantized types are not compatible with floats
quantized_types = [
dtypes.qint16, dtypes.qint32, dtypes.qint8, dtypes.quint16,
dtypes.quint8
]
for t in quantized_types:
with self.assertRaises(TypeError):
constant_op.constant(0.5, dtype=t)
# TODO(b/118402529): quantized types are broken in eager.
@test_util.run_in_graph_and_eager_modes
def testCConvertToTensor(self):
with self.assertRaises(TypeError):
_ = constant_op.constant(0) < 0.5
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorAllowsOverflow(self):
_ = ops.convert_to_tensor(123456789, dtype=dtypes.uint8)
@test_util.run_in_graph_and_eager_modes
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testConvertToTensorNumpyZeroDim(self):
for np_type, dtype in [(np.int32, dtypes.int32), (np.half, dtypes.half),
(np.float32, dtypes.float32)]:
x = ops.convert_to_tensor(
[np.array(65, dtype=np_type),
np.array(16, dtype=np_type)])
self.assertEqual(x.dtype, dtype)
self.assertAllEqual(x, [65, 16])
@test_util.run_in_graph_and_eager_modes
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testConvertToTensorNumpyScalar(self):
x = ops.convert_to_tensor([
np.array(321, dtype=np.int64).item(),
np.array(16, dtype=np.int64).item()
])
self.assertAllEqual(x, [321, 16])
def testEagerTensorError(self):
with self.assertRaisesRegex(TypeError,
"Cannot convert .* to EagerTensor of dtype .*"):
_ = ops.convert_to_tensor(1., dtype=dtypes.int32)
def testEagerLargeConstant(self):
for t in [dtypes.uint64, dtypes.uint32, dtypes.int32, dtypes.int64]:
self.assertEqual(constant_op.constant(t.max, dtype=t).numpy(), t.max)
self.assertEqual(constant_op.constant(t.min, dtype=t).numpy(), t.min)
def test_numpyIsView(self):
with ops.device("CPU"):
t = constant_op.constant([0.0])
t._numpy()[0] = 42.0
self.assertAllClose(t, constant_op.constant([42.0]))
def test_numpyFailsForResource(self):
v = variables.Variable(42)
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Cannot convert .+ resource"):
v._handle._numpy()
def test_numpyFailsForVariant(self):
variant_t = list_ops.tensor_list_reserve(
element_shape=[], num_elements=1, element_dtype=dtypes.float32)
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Cannot convert .+ variant"):
variant_t._numpy()
def testMemoryviewFailsForResource(self):
v = variables.Variable(42)
with self.assertRaisesRegex(BufferError, "Cannot convert .+ resource"):
np.asarray(memoryview(v._handle))
def testMemoryviewFailsForVariant(self):
variant_t = list_ops.tensor_list_reserve(
element_shape=[], num_elements=1, element_dtype=dtypes.float32)
with self.assertRaisesRegex(BufferError, "Cannot convert .+ variant"):
np.asarray(memoryview(variant_t))
def testMemoryviewIsReadonly(self):
t = constant_op.constant([0.0])
self.assertTrue(memoryview(t).readonly)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testMemoryviewScalar(self):
t = constant_op.constant(42.0)
self.assertAllEqual(
np.array(memoryview(t)), np.array(42.0, dtype=np.float32))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testMemoryviewEmpty(self):
t = constant_op.constant([], dtype=np.float32)
self.assertAllEqual(np.array(memoryview(t)), np.array([]))
@test_util.run_gpu_only
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testMemoryviewCopyToCPU(self):
with ops.device("/device:GPU:0"):
t = constant_op.constant([0.0])
self.assertAllEqual(
np.array(memoryview(t)), np.array([0.0], dtype=np.float32))
@test_util.disable_tfrt("b/169877776: ResourceVariable is not initialized "
"properly in TFRT")
def testResourceTensorCopy(self):
if not test_util.is_gpu_available():
self.skipTest("GPU only")
with ops.device("GPU:0"):
v = resource_variable_ops.ResourceVariable(1.)
read_handle_on_gpu = resource_variable_ops.read_variable_op(
v.handle, dtypes.float32)
handle_on_cpu = v.handle.cpu()
read_handle_on_cpu = resource_variable_ops.read_variable_op(
handle_on_cpu, dtypes.float32)
self.assertAllEqual(read_handle_on_cpu, read_handle_on_gpu)
def testEagerTensorFormat(self):
t = array_ops.constant(1)
self.assertEqual(f"{t}", "1")
self.assertEqual(str(t), "tf.Tensor(1, shape=(), dtype=int32)")
self.assertEqual(f"{t!s}", "tf.Tensor(1, shape=(), dtype=int32)")
self.assertEqual(repr(t), "<tf.Tensor: shape=(), dtype=int32, numpy=1>")
self.assertEqual(f"{t!r}", "<tf.Tensor: shape=(), dtype=int32, numpy=1>")
def testEagerTensorFormatForResource(self):
t = resource_variable_ops.VarHandleOp(shape=[], dtype=dtypes.float32)
# type is compiler-dependent, as it comes from demangling.
handle_str = (f"<ResourceHandle("
f"name=\"\", "
f"device=\"{t.device}\", "
f"container=\"localhost\", "
f"type=\"@@tensorflow@@Var@@\")>")
def make_regex(s):
return re.escape(s).replace("@@", ".*")
self.assertRegex(f"{t}", make_regex(handle_str))
self.assertRegex(
str(t),
make_regex(f"tf.Tensor({handle_str}, shape=(), dtype=resource)"))
self.assertRegex(
f"{t!s}",
make_regex(f"tf.Tensor({handle_str}, shape=(), dtype=resource)"))
self.assertRegex(
repr(t),
make_regex(
f"<tf.Tensor: shape=(), dtype=resource, value={handle_str}>"))
self.assertRegex(
f"{t!r}",
make_regex(
f"<tf.Tensor: shape=(), dtype=resource, value={handle_str}>"))
def testEagerTensorFormatForVariant(self):
t = list_ops.tensor_list_reserve(
element_shape=[1], num_elements=1, element_dtype=dtypes.float32)
self.assertEqual(f"{t}", "<TensorList>")
self.assertEqual(str(t), "tf.Tensor(<TensorList>, shape=(), dtype=variant)")
self.assertEqual(f"{t!s}",
"tf.Tensor(<TensorList>, shape=(), dtype=variant)")
self.assertEqual(
repr(t), "<tf.Tensor: shape=(), dtype=variant, value=<TensorList>>")
self.assertEqual(
f"{t!r}", "<tf.Tensor: shape=(), dtype=variant, value=<TensorList>>")
def testNumpyTooManyDimensions(self):
max_dims = 64 if np.lib.NumpyVersion(np.__version__) >= "2.0.0.dev0" else 32
t = constant_op.constant(1., shape=[1] * (max_dims + 1))
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Cannot convert tensor with %d dimensions to NumPy array. NumPy arrays "
"can have at most %d dimensions"% (max_dims + 1, max_dims)):
t.numpy()
def testNumpyDimsTooBig(self):
# Creating a Numpy array fails in some cases if the product of non-zero
# dimensions is very big, even if the shape also has a zero in it.
t = array_ops.ones((0, 2**31, 2**31))
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Failed to create numpy array from tensor of shape "
r"\[0, 2147483648, 2147483648\]. Numpy error.*array is too big"):
t.numpy()
| TFETensorTest |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/supporting_types.py | {
"start": 411,
"end": 645
} | class ____(str, Enum):
"""The four levels of maturity for features within Great Expectations"""
CONCEPT_ONLY = "CONCEPT_ONLY"
EXPERIMENTAL = "EXPERIMENTAL"
BETA = "BETA"
PRODUCTION = "PRODUCTION"
@dataclass
| Maturity |
python | airbytehq__airbyte | airbyte-ci/connectors/erd/src/erd/dbml_assembler.py | {
"start": 3228,
"end": 9140
} | class ____:
def assemble(
self,
source: Source,
discovered_catalog: AirbyteCatalog,
relationships: Relationships,
) -> Database:
database = Database()
for stream in discovered_catalog.streams:
if source.is_dynamic(stream.name):
print(f"Skipping stream {stream.name} as it is dynamic")
continue
database.add(self._create_table(stream))
self._add_references(source, database, relationships)
return database
def _create_table(self, stream: AirbyteStream) -> Table:
dbml_table = Table(stream.name)
for property_name, property_information in stream.json_schema.get(
"properties"
).items():
try:
dbml_table.add_column(
Column(
name=property_name,
type=self._extract_type(property_information["type"]),
pk=self._is_pk(stream, property_name),
)
)
except (KeyError, ValueError) as exception:
print(f"Ignoring field {property_name}: {exception}")
continue
if (
stream.source_defined_primary_key
and len(stream.source_defined_primary_key) > 1
):
if any(map(lambda key: len(key) != 1, stream.source_defined_primary_key)):
raise ValueError(
f"Does not support nested key as part of primary key `{stream.source_defined_primary_key}`"
)
composite_key_columns = [
column
for key in stream.source_defined_primary_key
for column in dbml_table.columns
if column.name in key
]
if len(composite_key_columns) < len(stream.source_defined_primary_key):
raise ValueError("Unexpected error: missing PK column from dbml table")
dbml_table.add_index(
Index(
subjects=composite_key_columns,
pk=True,
)
)
return dbml_table
def _add_references(
self, source: Source, database: Database, relationships: Relationships
) -> None:
for stream in relationships["streams"]:
for column_name, relationship in stream["relations"].items():
if source.is_dynamic(stream["name"]):
print(
f"Skipping relationship as stream {stream['name']} from relationship is dynamic"
)
continue
try:
target_table_name, target_column_name = relationship.split(
".", 1
) # we support the field names having dots but not stream name hence we split on the first dot only
except ValueError as exception:
raise ValueError(
f"Could not handle relationship {relationship}"
) from exception
if source.is_dynamic(target_table_name):
print(
f"Skipping relationship as target stream {target_table_name} is dynamic"
)
continue
try:
database.add_reference(
Reference(
type="<>", # we don't have the information of which relationship type it is so we assume many-to-many for now
col1=self._get_column(
database, stream["name"], column_name
),
col2=self._get_column(
database, target_table_name, target_column_name
),
)
)
except ValueError as exception:
print(f"Skipping relationship: {exception}")
def _extract_type(self, property_type: Union[str, List[str]]) -> str:
if isinstance(property_type, str):
return property_type
types = list(property_type)
if "null" in types:
# As we flag everything as nullable (except PK and cursor field), there is little value in keeping the information in order to
# show this in DBML
types.remove("null")
if len(types) != 1:
raise ValueError(
f"Expected only one type apart from `null` but got {len(types)}: {property_type}"
)
return types[0]
def _is_pk(self, stream: AirbyteStream, property_name: str) -> bool:
return stream.source_defined_primary_key == [[property_name]]
def _get_column(
self, database: Database, table_name: str, column_name: str
) -> Column:
matching_tables = list(
filter(lambda dbml_table: dbml_table.name == table_name, database.tables)
)
if len(matching_tables) == 0:
raise ValueError(f"Could not find table {table_name}")
elif len(matching_tables) > 1:
raise ValueError(
f"Unexpected error: many tables found with name {table_name}"
)
table: Table = matching_tables[0]
matching_columns = list(
filter(lambda column: column.name == column_name, table.columns)
)
if len(matching_columns) == 0:
raise ValueError(
f"Could not find column {column_name} in table {table_name}. Columns are: {table.columns}"
)
elif len(matching_columns) > 1:
raise ValueError(
f"Unexpected error: many columns found with name {column_name} for table {table_name}"
)
return matching_columns[0]
| DbmlAssembler |
python | dagster-io__dagster | scripts/gen_airbyte_classes.py | {
"start": 1241,
"end": 2845
} | class ____(ABC):
"""Corresponds to the Python type of a field in a schema, has methods to generate
the Python code to annotate that type or check it at runtime.
"""
description: Optional[str] = None
@abstractmethod
def get_check(self, name: str, scope: Optional[str] = None) -> str:
"""Returns the dagster._check check for this type, e.g. check.str_param(name, 'name')."""
@abstractmethod
def annotation(
self, scope: Optional[str] = None, quote: bool = False, hide_default: bool = False
) -> str:
"""Returns the Python type annotation for this type, e.g. str or Union[str, int]."""
@property
def const_value(self) -> object:
"""If this is a constant field, returns the constant value, otherwise returns None."""
return None
def add_description(self, description: str) -> None:
if not description:
return
self.description = description.replace("\n", " ")
def get_doc_desc(self, name: str, scope: Optional[str] = None) -> Optional[str]:
if not self.description:
return None
formatted_desc = (
f"{name} ({self.annotation(hide_default=True, scope=scope)}): {self.description}"
)
desc_escaped_trailing_underscores = re.sub(
r"_([^a-zA-Z0-9_])",
r"\\_\1",
formatted_desc,
)
desc_escaped_backslashes = desc_escaped_trailing_underscores.replace("\\", "\\\\")
desc_removed_tags = re.sub("<[^<]+?>", "", desc_escaped_backslashes)
return desc_removed_tags
| SchemaType |
python | docker__docker-py | docker/types/containers.py | {
"start": 571,
"end": 2925
} | class ____(DictType):
"""
Configure logging for a container, when provided as an argument to
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
You may refer to the
`official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
for more information.
Args:
type (str): Indicate which log driver to use. A set of valid drivers
is provided as part of the :py:attr:`LogConfig.types`
enum. Other values may be accepted depending on the engine version
and available logging plugins.
config (dict): A driver-dependent configuration dictionary. Please
refer to the driver's documentation for a list of valid config
keys.
Example:
>>> from docker.types import LogConfig
>>> lc = LogConfig(type=LogConfig.types.JSON, config={
... 'max-size': '1g',
... 'labels': 'production_status,geo'
... })
>>> hc = client.create_host_config(log_config=lc)
>>> container = client.create_container('busybox', 'true',
... host_config=hc)
>>> client.inspect_container(container)['HostConfig']['LogConfig']
{
'Type': 'json-file',
'Config': {'labels': 'production_status,geo', 'max-size': '1g'}
}
"""
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super().__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
""" Set a the value for ``key`` to ``value`` inside the ``config``
dict.
"""
self.config[key] = value
def unset_config(self, key):
""" Remove the ``key`` property from the ``config`` dict. """
if key in self.config:
del self.config[key]
| LogConfig |
python | PrefectHQ__prefect | tests/runner/test_runner.py | {
"start": 76602,
"end": 109759
} | class ____:
@pytest.fixture
def relative_file_path(self):
return Path(__file__).relative_to(Path.cwd())
@pytest.fixture
def dummy_flow_1_entrypoint(self, relative_file_path):
return f"{relative_file_path}:dummy_flow_1"
@pytest.mark.parametrize(
"dummy_flow, flow_name, entrypoint_suffix",
[
(
dummy_flow_1,
"dummy-flow-1",
"dummy_flow_1",
),
(
ClassNameClassmethod.dummy_flow_classmethod,
"dummy-flow-classmethod",
"ClassNameClassmethod.dummy_flow_classmethod",
),
(
ClassNameStaticmethod.dummy_flow_staticmethod,
"dummy-flow-staticmethod",
"ClassNameStaticmethod.dummy_flow_staticmethod",
),
],
)
def test_from_flow(
self,
dummy_flow: Flow,
flow_name: str,
entrypoint_suffix: str,
relative_file_path: Path,
):
deployment = RunnerDeployment.from_flow(
dummy_flow,
__file__,
tags=["test"],
version="alpha",
version_type=VersionType.SIMPLE,
description="Deployment descriptions",
enforce_parameter_schema=True,
concurrency_limit=42,
)
assert deployment.name == "test_runner"
assert deployment.flow_name == flow_name
assert deployment.entrypoint == f"{relative_file_path}:{entrypoint_suffix}"
assert deployment.description == "Deployment descriptions"
assert deployment.version == "alpha"
assert deployment.version_type == VersionType.SIMPLE
assert deployment.tags == ["test"]
assert deployment.paused is False
assert deployment.enforce_parameter_schema
assert deployment.concurrency_limit == 42
async def test_from_flow_can_produce_a_module_path_entrypoint(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
entrypoint_type=EntrypointType.MODULE_PATH,
)
assert (
deployment.entrypoint
== f"{dummy_flow_1.__module__}.{dummy_flow_1.__name__}"
)
def test_from_flow_accepts_interval(self):
deployment = RunnerDeployment.from_flow(dummy_flow_1, __file__, interval=3600)
assert deployment.schedules
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
def test_from_flow_accepts_interval_as_list(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1, __file__, interval=[3600, 7200]
)
assert deployment.schedules
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
assert deployment.schedules[1].schedule.interval == datetime.timedelta(
seconds=7200
)
def test_from_flow_accepts_cron(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1, __file__, cron="* * * * *"
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
def test_from_flow_accepts_cron_as_list(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
cron=[
"0 * * * *",
"0 0 1 * *",
"*/10 * * * *",
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "0 * * * *"
assert deployment.schedules[1].schedule.cron == "0 0 1 * *"
assert deployment.schedules[2].schedule.cron == "*/10 * * * *"
def test_from_flow_accepts_rrule(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1, __file__, rrule="FREQ=MINUTELY"
)
assert deployment.schedules
assert deployment.schedules[0].schedule.rrule == "FREQ=MINUTELY"
def test_from_flow_accepts_rrule_as_list(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
rrule=[
"FREQ=DAILY",
"FREQ=WEEKLY",
"FREQ=MONTHLY",
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.rrule == "FREQ=DAILY"
assert deployment.schedules[1].schedule.rrule == "FREQ=WEEKLY"
assert deployment.schedules[2].schedule.rrule == "FREQ=MONTHLY"
def test_from_flow_accepts_schedules(self):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
schedules=[
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
),
IntervalSchedule(interval=datetime.timedelta(days=1)),
{
"schedule": IntervalSchedule(interval=datetime.timedelta(days=2)),
"active": False,
},
Interval(datetime.timedelta(days=3)),
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
assert deployment.schedules[0].active is True
assert deployment.schedules[1].schedule.interval == datetime.timedelta(days=1)
assert deployment.schedules[1].active is True
assert deployment.schedules[2].schedule.interval == datetime.timedelta(days=2)
assert deployment.schedules[2].active is False
assert deployment.schedules[3].schedule.interval == datetime.timedelta(days=3)
assert deployment.schedules[3].active is True
@pytest.mark.parametrize(
"value,expected",
[(True, True), (False, False), (None, False)],
)
def test_from_flow_accepts_paused(self, value, expected):
deployment = RunnerDeployment.from_flow(dummy_flow_1, __file__, paused=value)
assert deployment.paused is expected
async def test_from_flow_accepts_concurrency_limit_config(self):
concurrency_limit_config = ConcurrencyLimitConfig(
limit=42, collision_strategy="CANCEL_NEW"
)
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
concurrency_limit=concurrency_limit_config,
)
assert deployment.concurrency_limit == concurrency_limit_config.limit
assert (
deployment.concurrency_options.collision_strategy
== concurrency_limit_config.collision_strategy
)
@pytest.mark.parametrize(
"kwargs",
[
{**d1, **d2}
for d1, d2 in combinations(
[
{"interval": 3600},
{"cron": "* * * * *"},
{"rrule": "FREQ=MINUTELY"},
{
"schedules": [
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
)
],
},
{"schedule": Cron("* * * * *")},
],
2,
)
],
)
def test_from_flow_raises_on_multiple_schedule_parameters(self, kwargs):
expected_message = (
"Only one of interval, cron, rrule, schedule, or schedules can be provided."
)
with pytest.raises(ValueError, match=expected_message):
RunnerDeployment.from_flow(dummy_flow_1, __file__, **kwargs)
def test_from_flow_uses_defaults_from_flow(self):
deployment = RunnerDeployment.from_flow(dummy_flow_1, __file__)
assert deployment.version == "test"
assert deployment._version_from_flow is True
assert deployment.description == "I'm just here for tests"
def test_from_flow_raises_on_interactively_defined_flow(self):
@flow
def da_flow():
pass
# Clear __module__ to test it's handled correctly
da_flow.__module__ = None
with pytest.raises(
ValueError,
match="Flows defined interactively cannot be deployed.",
):
RunnerDeployment.from_flow(da_flow, __file__)
# muck up __module__ so that it looks like it was defined interactively
da_flow.__module__ = "__not_a_real_module__"
with pytest.raises(
ValueError,
match="Flows defined interactively cannot be deployed.",
):
RunnerDeployment.from_flow(da_flow, __file__)
def test_from_entrypoint(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint,
__file__,
tags=["test"],
version="alpha",
description="Deployment descriptions",
enforce_parameter_schema=True,
)
assert deployment.name == "test_runner"
assert deployment.flow_name == "dummy-flow-1"
assert deployment.entrypoint == "tests/runner/test_runner.py:dummy_flow_1"
assert deployment.description == "Deployment descriptions"
assert deployment.version == "alpha"
assert deployment.tags == ["test"]
assert deployment.enforce_parameter_schema
assert deployment.concurrency_limit is None
def test_from_entrypoint_accepts_interval(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint, __file__, interval=3600
)
assert deployment.schedules
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
def test_from_entrypoint_accepts_interval_as_list(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint, __file__, interval=[3600, 7200]
)
assert deployment.schedules
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
assert deployment.schedules[1].schedule.interval == datetime.timedelta(
seconds=7200
)
def test_from_entrypoint_accepts_cron(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint, __file__, cron="* * * * *"
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
def test_from_entrypoint_accepts_cron_as_list(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint,
__file__,
cron=[
"0 * * * *",
"0 0 1 * *",
"*/10 * * * *",
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "0 * * * *"
assert deployment.schedules[1].schedule.cron == "0 0 1 * *"
assert deployment.schedules[2].schedule.cron == "*/10 * * * *"
def test_from_entrypoint_accepts_rrule(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint, __file__, rrule="FREQ=MINUTELY"
)
assert deployment.schedules
assert deployment.schedules[0].schedule.rrule == "FREQ=MINUTELY"
def test_from_entrypoint_accepts_rrule_as_list(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint,
__file__,
rrule=[
"FREQ=DAILY",
"FREQ=WEEKLY",
"FREQ=MONTHLY",
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.rrule == "FREQ=DAILY"
assert deployment.schedules[1].schedule.rrule == "FREQ=WEEKLY"
assert deployment.schedules[2].schedule.rrule == "FREQ=MONTHLY"
def test_from_entrypoint_accepts_schedules(self, dummy_flow_1_entrypoint):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint,
__file__,
schedules=[
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
),
IntervalSchedule(interval=datetime.timedelta(days=1)),
{
"schedule": IntervalSchedule(interval=datetime.timedelta(days=2)),
"active": False,
},
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
assert deployment.schedules[0].active is True
assert deployment.schedules[1].schedule.interval == datetime.timedelta(days=1)
assert deployment.schedules[1].active is True
assert deployment.schedules[2].schedule.interval == datetime.timedelta(days=2)
assert deployment.schedules[2].active is False
async def test_from_entrypoint_accepts_concurrency_limit_config(
self, dummy_flow_1_entrypoint
):
concurrency_limit_config = ConcurrencyLimitConfig(
limit=42, collision_strategy="CANCEL_NEW"
)
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint,
__file__,
concurrency_limit=concurrency_limit_config,
)
assert deployment.concurrency_limit == concurrency_limit_config.limit
assert (
deployment.concurrency_options.collision_strategy
== concurrency_limit_config.collision_strategy
)
@pytest.mark.parametrize(
"value,expected",
[(True, True), (False, False), (None, False)],
)
def test_from_entrypoint_accepts_paused(
self, value, expected, dummy_flow_1_entrypoint
):
deployment = RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint, __file__, paused=value
)
assert deployment.paused is expected
@pytest.mark.parametrize(
"kwargs",
[
{**d1, **d2}
for d1, d2 in combinations(
[
{"interval": 3600},
{"cron": "* * * * *"},
{"rrule": "FREQ=MINUTELY"},
{
"schedules": [
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
)
]
},
],
2,
)
],
)
def test_from_entrypoint_raises_on_multiple_schedule_parameters(
self, dummy_flow_1_entrypoint, kwargs
):
expected_message = (
"Only one of interval, cron, rrule, schedule, or schedules can be provided."
)
with pytest.raises(ValueError, match=expected_message):
RunnerDeployment.from_entrypoint(
dummy_flow_1_entrypoint, __file__, **kwargs
)
def test_from_entrypoint_uses_defaults_from_entrypoint(
self, dummy_flow_1_entrypoint
):
deployment = RunnerDeployment.from_entrypoint(dummy_flow_1_entrypoint, __file__)
assert deployment.version == "test"
assert deployment.description == "I'm just here for tests"
async def test_apply(self, prefect_client: PrefectClient):
deployment = RunnerDeployment.from_flow(
dummy_flow_1, __file__, interval=3600, version_type=VersionType.SIMPLE
)
deployment_id = await deployment.apply()
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.name == "test_runner"
assert deployment.entrypoint == "tests/runner/test_runner.py:dummy_flow_1"
assert deployment.version == "test"
assert deployment.description == "I'm just here for tests"
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
assert deployment.work_pool_name is None
assert deployment.work_queue_name is None
assert deployment.path == "."
assert deployment.enforce_parameter_schema
assert deployment.job_variables == {}
assert deployment.paused is False
assert deployment.global_concurrency_limit is None
async def test_apply_with_work_pool(
self, prefect_client: PrefectClient, work_pool, process_work_pool
):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
interval=3600,
)
deployment_id = await deployment.apply(
work_pool_name=work_pool.name, image="my-repo/my-image:latest"
)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.work_pool_name == work_pool.name
assert deployment.job_variables == {
"image": "my-repo/my-image:latest",
}
assert deployment.work_queue_name == "default"
# should result in the same deployment ID
deployment2 = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
interval=3600,
)
deployment_id = await deployment2.apply(work_pool_name=process_work_pool.name)
deployment2 = await prefect_client.read_deployment(deployment_id)
assert deployment2.work_pool_name == process_work_pool.name
# this may look weird with a process pool but update's job isn't to enforce that schema
assert deployment2.job_variables == {
"image": "my-repo/my-image:latest",
}
assert deployment2.work_queue_name == "default"
async def test_apply_with_image(self, prefect_client: PrefectClient, work_pool):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
"test-image",
)
deployment_id = await deployment.apply(
work_pool_name=work_pool.name, image="my-repo/my-image:latest"
)
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.work_pool_name == work_pool.name
assert deployment.job_variables == {
"image": "my-repo/my-image:latest",
}
assert deployment.work_queue_name == "default"
# should result in the same deployment ID
deployment2 = RunnerDeployment.from_flow(
dummy_flow_1,
"test-image",
)
deployment_id = await deployment2.apply(image="my-other-repo/my-image:latest")
deployment2 = await prefect_client.read_deployment(deployment_id)
assert deployment2.work_pool_name == work_pool.name
assert deployment2.job_variables == {
"image": "my-other-repo/my-image:latest",
}
assert deployment2.work_queue_name == "default"
async def test_apply_paused(self, prefect_client: PrefectClient):
deployment = RunnerDeployment.from_flow(
dummy_flow_1, __file__, interval=3600, paused=True
)
deployment_id = await deployment.apply()
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.paused is True
@pytest.mark.parametrize(
"from_flow_kwargs, apply_kwargs, expected_message",
[
(
{"work_queue_name": "my-queue"},
{},
(
"A work queue can only be provided when registering a deployment"
" with a work pool."
),
),
(
{"job_variables": {"foo": "bar"}},
{},
(
"Job variables can only be provided when registering a deployment"
" with a work pool."
),
),
(
{},
{"image": "my-repo/my-image:latest"},
(
"An image can only be provided when registering a deployment with a"
" work pool."
),
),
],
)
async def test_apply_no_work_pool_failures(
self, from_flow_kwargs, apply_kwargs, expected_message
):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
interval=3600,
**from_flow_kwargs,
)
with pytest.raises(
ValueError,
match=expected_message,
):
await deployment.apply(**apply_kwargs)
async def test_apply_raises_on_api_errors(self, work_pool_with_image_variable):
deployment = RunnerDeployment.from_flow(
dummy_flow_1,
__file__,
work_pool_name=work_pool_with_image_variable.name,
job_variables={"image_pull_policy": "blork"},
)
with pytest.raises(
DeploymentApplyError,
match=re.escape(
"Error creating deployment: Validation failed for field 'image_pull_policy'. Failure reason: 'blork' is not one of"
" ['IfNotPresent', 'Always', 'Never']"
),
):
await deployment.apply()
def test_create_runner_deployment_from_storage(self, temp_storage: MockStorage):
concurrency_limit_config = ConcurrencyLimitConfig(
limit=42, collision_strategy="CANCEL_NEW"
)
deployment = RunnerDeployment.from_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="test-deployment",
interval=datetime.timedelta(seconds=30),
description="Test Deployment Description",
tags=["tag1", "tag2"],
version="1.0.0",
version_type=VersionType.SIMPLE,
enforce_parameter_schema=True,
concurrency_limit=concurrency_limit_config,
)
assert isinstance(deployment, RunnerDeployment)
# Verify the created RunnerDeployment's attributes
assert deployment.name == "test-deployment"
assert deployment.flow_name == "test-flow"
assert deployment.schedules
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=30
)
assert deployment.tags == ["tag1", "tag2"]
assert deployment.version == "1.0.0"
assert deployment.version_type == VersionType.SIMPLE
assert deployment.description == "Test Deployment Description"
assert deployment.enforce_parameter_schema is True
assert deployment.concurrency_limit == concurrency_limit_config.limit
assert (
deployment.concurrency_options.collision_strategy
== concurrency_limit_config.collision_strategy
)
assert deployment._path
assert "$STORAGE_BASE_PATH" in deployment._path
assert deployment.entrypoint == "flows.py:test_flow"
assert deployment.storage == temp_storage
async def test_create_runner_deployment_from_storage_async(
self, temp_storage: MockStorage
):
concurrency_limit_config = ConcurrencyLimitConfig(
limit=42, collision_strategy="CANCEL_NEW"
)
deployment = await RunnerDeployment.afrom_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="test-deployment",
interval=datetime.timedelta(seconds=30),
description="Test Deployment Description",
tags=["tag1", "tag2"],
version="1.0.0",
version_type=VersionType.SIMPLE,
enforce_parameter_schema=True,
concurrency_limit=concurrency_limit_config,
)
# Verify the created RunnerDeployment's attributes
assert deployment.name == "test-deployment"
assert deployment.flow_name == "test-flow"
assert deployment.schedules
assert deployment.schedules[0].schedule.interval == datetime.timedelta(
seconds=30
)
assert deployment.tags == ["tag1", "tag2"]
assert deployment.version == "1.0.0"
assert deployment.version_type == VersionType.SIMPLE
assert deployment.description == "Test Deployment Description"
assert deployment.enforce_parameter_schema is True
assert deployment.concurrency_limit == concurrency_limit_config.limit
assert (
deployment.concurrency_options.collision_strategy
== concurrency_limit_config.collision_strategy
)
assert deployment._path
assert "$STORAGE_BASE_PATH" in deployment._path
assert deployment.entrypoint == "flows.py:test_flow"
assert deployment.storage == temp_storage
def test_from_storage_accepts_schedules(self, temp_storage: MockStorage):
deployment = RunnerDeployment.from_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="test-deployment",
schedules=[
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
),
IntervalSchedule(interval=datetime.timedelta(days=1)),
{
"schedule": IntervalSchedule(interval=datetime.timedelta(days=2)),
"active": False,
},
Interval(datetime.timedelta(days=3)),
],
)
assert isinstance(deployment, RunnerDeployment)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
assert deployment.schedules[0].active is True
assert deployment.schedules[1].schedule.interval == datetime.timedelta(days=1)
assert deployment.schedules[1].active is True
assert deployment.schedules[2].schedule.interval == datetime.timedelta(days=2)
assert deployment.schedules[2].active is False
assert deployment.schedules[3].schedule.interval == datetime.timedelta(days=3)
assert deployment.schedules[3].active is True
async def test_afrom_storage_accepts_schedules(self, temp_storage: MockStorage):
deployment = await RunnerDeployment.afrom_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="test-deployment",
schedules=[
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
),
IntervalSchedule(interval=datetime.timedelta(days=1)),
{
"schedule": IntervalSchedule(interval=datetime.timedelta(days=2)),
"active": False,
},
Interval(datetime.timedelta(days=3)),
],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
assert deployment.schedules[0].active is True
assert deployment.schedules[1].schedule.interval == datetime.timedelta(days=1)
assert deployment.schedules[1].active is True
assert deployment.schedules[2].schedule.interval == datetime.timedelta(days=2)
assert deployment.schedules[2].active is False
assert deployment.schedules[3].schedule.interval == datetime.timedelta(days=3)
assert deployment.schedules[3].active is True
@pytest.mark.parametrize(
"value,expected",
[(True, True), (False, False), (None, False)],
)
def test_from_storage_accepts_paused(
self, value: Union[bool, None], expected: bool, temp_storage: MockStorage
):
deployment = RunnerDeployment.from_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="test-deployment",
paused=value,
)
assert isinstance(deployment, RunnerDeployment)
assert deployment.paused is expected
@pytest.mark.parametrize(
"value,expected",
[(True, True), (False, False), (None, False)],
)
async def test_afrom_storage_accepts_paused(
self, value: Union[bool, None], expected: bool, temp_storage: MockStorage
):
deployment = await RunnerDeployment.afrom_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="test-deployment",
paused=value,
)
assert deployment.paused is expected
async def test_init_runner_deployment_with_schedules(self):
schedule = CronSchedule(cron="* * * * *")
deployment = RunnerDeployment(
flow=dummy_flow_1,
name="test-deployment",
schedules=[schedule],
)
assert deployment.schedules
assert deployment.schedules[0].schedule.cron == "* * * * *"
assert deployment.schedules[0].active is True
async def test_init_runner_deployment_with_invalid_schedules(self):
with pytest.raises(ValueError, match="Invalid schedule"):
RunnerDeployment(
flow=dummy_flow_1,
name="test-deployment",
schedules=[
"not a schedule",
],
)
async def test_deployment_name_with_dots(self):
# regression test for https://github.com/PrefectHQ/prefect/issues/16551
deployment = RunnerDeployment.from_flow(dummy_flow_1, name="..test-deployment")
assert deployment.name == "..test-deployment"
deployment2 = RunnerDeployment.from_flow(
dummy_flow_1, name="flow-from-my.python.module"
)
assert deployment2.name == "flow-from-my.python.module"
def test_deployment_name_with_dots_from_storage(self, temp_storage: MockStorage):
# regression test for deployment name truncation in from_storage
# names with dots like "v2.0.1" should not be truncated to "v2"
deployment = RunnerDeployment.from_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="pricing-subflow-v2.0.1",
)
assert deployment.name == "pricing-subflow-v2.0.1"
# test with multiple dots
deployment2 = RunnerDeployment.from_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="my-flow-v1.2.3.4",
)
assert deployment2.name == "my-flow-v1.2.3.4"
async def test_deployment_name_with_dots_from_storage_async(
self, temp_storage: MockStorage
):
# regression test for deployment name truncation in afrom_storage
deployment = await RunnerDeployment.afrom_storage(
storage=temp_storage,
entrypoint="flows.py:test_flow",
name="pricing-subflow-v2.0.1",
)
assert deployment.name == "pricing-subflow-v2.0.1"
async def test_from_flow_with_frozen_parameters(
self, prefect_client: PrefectClient
):
"""Test that frozen parameters are properly handled in deployment creation."""
@flow
def dummy_flow_4(value: Any): ...
deployment_object = RunnerDeployment.from_flow(
dummy_flow_4,
__file__,
parameters={"value": freeze("test")},
)
assert deployment_object.parameters == {"value": "test"}
deployment_id = await deployment_object.apply()
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.parameters == {"value": "test"}
assert (
deployment.parameter_openapi_schema["properties"]["value"]["readOnly"]
is True
)
assert deployment.parameter_openapi_schema["properties"]["value"]["enum"] == [
"test"
]
async def test_from_flow_with_frozen_parameters_preserves_type(
self, prefect_client: PrefectClient
):
"""Test that frozen parameters preserve their type information."""
@flow
def dummy_flow_5(number: int): ...
deployment_object = RunnerDeployment.from_flow(
dummy_flow_5,
__file__,
parameters={"number": freeze(42)},
)
assert deployment_object.parameters == {"number": 42}
deployment_id = await deployment_object.apply()
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.parameters == {"number": 42}
assert (
deployment.parameter_openapi_schema["properties"]["number"]["type"]
== "integer"
)
assert (
deployment.parameter_openapi_schema["properties"]["number"]["readOnly"]
is True
)
assert deployment.parameter_openapi_schema["properties"]["number"]["enum"] == [
42
]
| TestRunnerDeployment |
python | getsentry__sentry | src/sentry/workflow_engine/processors/data_condition.py | {
"start": 123,
"end": 655
} | class ____(NamedTuple):
fast: list[DataCondition]
slow: list[DataCondition]
def split_conditions_by_speed(
conditions: list[DataCondition],
) -> SplitConditions:
fast_conditions: list[DataCondition] = []
slow_conditions: list[DataCondition] = []
for condition in conditions:
if is_slow_condition(condition):
slow_conditions.append(condition)
else:
fast_conditions.append(condition)
return SplitConditions(fast=fast_conditions, slow=slow_conditions)
| SplitConditions |
python | celery__celery | t/unit/utils/test_platforms.py | {
"start": 5224,
"end": 5575
} | class ____:
def test_call(self):
set_pdeathsig('SIGKILL')
@t.skip.if_win32
def test_call_with_correct_parameter(self):
with patch('celery.platforms._set_pdeathsig') as _set_pdeathsig:
set_pdeathsig('SIGKILL')
_set_pdeathsig.assert_called_once_with(signal.SIGKILL)
@t.skip.if_win32
| test_set_pdeathsig |
python | eventlet__eventlet | tests/db_pool_test.py | {
"start": 10232,
"end": 10913
} | class ____(DBConnectionPool):
__test__ = False # so that nose doesn't try to execute this directly
def create_pool(self, min_size=0, max_size=1, max_idle=10, max_age=10,
connect_timeout=0.5, module=None):
if module is None:
module = self._dbmodule
return db_pool.TpooledConnectionPool(
module,
min_size=min_size, max_size=max_size,
max_idle=max_idle, max_age=max_age,
connect_timeout=connect_timeout,
**self._auth)
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
eventlet.tpool.killall()
| TpoolConnectionPool |
python | scipy__scipy | benchmarks/benchmarks/stats_sampling.py | {
"start": 7908,
"end": 8814
} | class ____(Benchmark):
param_names = ['distribution']
params = [
# a subset of discrete distributions with finite domain.
[['nhypergeom', (20, 7, 1)],
['hypergeom', (30, 12, 6)],
['nchypergeom_wallenius', (140, 80, 60, 0.5)],
['binom', (5, 0.4)]]
]
def setup(self, distribution):
distname, params = distribution
dist = getattr(stats, distname)
domain = dist.support(*params)
self.urng = np.random.default_rng(0x2fc9eb71cd5120352fa31b7a048aa867)
x = np.arange(domain[0], domain[1] + 1)
self.pv = dist.pmf(x, *params)
self.rng = sampling.DiscreteGuideTable(self.pv, random_state=self.urng)
def time_dgt_setup(self, distribution):
sampling.DiscreteGuideTable(self.pv, random_state=self.urng)
def time_dgt_rvs(self, distribution):
self.rng.rvs(100000)
| DiscreteGuideTable |
python | nedbat__coveragepy | tests/test_testing.py | {
"start": 9325,
"end": 10835
} | class ____(CoverageTest):
"""Tests of the failure assertions in check_coverage."""
CODE = """\
a, b = 1, 1
def oops(x):
if x % 2:
raise Exception("odd")
try:
a = 6
oops(1)
a = 8
except:
b = 10
assert a == 6 and b == 10
"""
BRANCHZ = "34 3-2"
BRANCHZ_MISSING = "3-2"
def test_check_coverage_possible_branches(self) -> None:
msg = "Wrong possible branches: [(7, -2), (7, 4)] != [(3, -2), (3, 4)]"
with pytest.raises(AssertionError, match=re.escape(msg)):
self.check_coverage(
self.CODE,
branchz=self.BRANCHZ.replace("3", "7"),
branchz_missing=self.BRANCHZ_MISSING,
)
def test_check_coverage_missing_branches(self) -> None:
msg = "Wrong missing branches: [(3, 4)] != [(3, -2)]"
with pytest.raises(AssertionError, match=re.escape(msg)):
self.check_coverage(
self.CODE,
branchz=self.BRANCHZ,
branchz_missing="34",
)
def test_check_coverage_mismatched_missing_branches(self) -> None:
msg = "branches_missing = [(1, 2)], has non-branches in it."
with pytest.raises(AssertionError, match=re.escape(msg)):
self.check_coverage(
self.CODE,
branchz=self.BRANCHZ,
branchz_missing="12",
)
| CheckCoverageTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/gcs.py | {
"start": 5726,
"end": 66276
} | class ____(GoogleBaseHook):
"""Use the Google Cloud connection to interact with Google Cloud Storage."""
_conn: storage.Client | None = None
def get_conn(self) -> storage.Client:
"""Return a Google Cloud Storage service object."""
if not self._conn:
self._conn = storage.Client(
credentials=self.get_credentials(), client_info=CLIENT_INFO, project=self.project_id
)
return self._conn
def copy(
self,
source_bucket: str,
source_object: str,
destination_bucket: str | None = None,
destination_object: str | None = None,
) -> None:
"""
Copy an object from a bucket to another, with renaming if requested.
destination_bucket or destination_object can be omitted, in which case
source bucket/object is used, but not both.
:param source_bucket: The bucket of the object to copy from.
:param source_object: The object to copy.
:param destination_bucket: The destination of the object to copied to.
Can be omitted; then the same bucket is used.
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
"""
destination_bucket = destination_bucket or source_bucket
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
f"Either source/destination bucket or source/destination object must be different, "
f"not both the same: bucket={source_bucket}, object={source_object}"
)
if not source_bucket or not source_object:
raise ValueError("source_bucket and source_object cannot be empty.")
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
destination_object = source_bucket.copy_blob( # type: ignore[attr-defined]
blob=source_object, destination_bucket=destination_bucket, new_name=destination_object
)
get_hook_lineage_collector().add_input_asset(
context=self,
scheme="gs",
asset_kwargs={"bucket": source_bucket.name, "key": source_object.name}, # type: ignore[attr-defined]
)
get_hook_lineage_collector().add_output_asset(
context=self,
scheme="gs",
asset_kwargs={"bucket": destination_bucket.name, "key": destination_object.name}, # type: ignore[union-attr]
)
self.log.info(
"Object %s in bucket %s copied to object %s in bucket %s",
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object.name, # type: ignore[union-attr]
destination_bucket.name, # type: ignore[union-attr]
)
def rewrite(
self,
source_bucket: str,
source_object: str,
destination_bucket: str,
destination_object: str | None = None,
) -> None:
"""
Similar to copy; supports files over 5 TB, and copying between locations and/or storage classes.
destination_object can be omitted, in which case source_object is used.
:param source_bucket: The bucket of the object to copy from.
:param source_object: The object to copy.
:param destination_bucket: The destination of the object to copied to.
:param destination_object: The (renamed) path of the object if given.
Can be omitted; then the same name is used.
"""
destination_object = destination_object or source_object
if source_bucket == destination_bucket and source_object == destination_object:
raise ValueError(
f"Either source/destination bucket or source/destination object must be different, "
f"not both the same: bucket={source_bucket}, object={source_object}"
)
if not source_bucket or not source_object:
raise ValueError("source_bucket and source_object cannot be empty.")
client = self.get_conn()
source_bucket = client.bucket(source_bucket)
source_object = source_bucket.blob(blob_name=source_object) # type: ignore[attr-defined]
destination_bucket = client.bucket(destination_bucket)
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object)
self.log.info("Total Bytes: %s | Bytes Written: %s", total_bytes, bytes_rewritten)
while token is not None:
token, bytes_rewritten, total_bytes = destination_bucket.blob( # type: ignore[attr-defined]
blob_name=destination_object
).rewrite(source=source_object, token=token)
self.log.info("Total Bytes: %s | Bytes Written: %s", total_bytes, bytes_rewritten)
get_hook_lineage_collector().add_input_asset(
context=self,
scheme="gs",
asset_kwargs={"bucket": source_bucket.name, "key": source_object.name}, # type: ignore[attr-defined]
)
get_hook_lineage_collector().add_output_asset(
context=self,
scheme="gs",
asset_kwargs={"bucket": destination_bucket.name, "key": destination_object}, # type: ignore[attr-defined]
)
self.log.info(
"Object %s in bucket %s rewritten to object %s in bucket %s",
source_object.name, # type: ignore[attr-defined]
source_bucket.name, # type: ignore[attr-defined]
destination_object,
destination_bucket.name, # type: ignore[attr-defined]
)
@overload
def download(
self,
bucket_name: str,
object_name: str,
filename: None = None,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
user_project: str | None = None,
) -> bytes: ...
@overload
def download(
self,
bucket_name: str,
object_name: str,
filename: str,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
user_project: str | None = None,
) -> str: ...
def download(
self,
bucket_name: str,
object_name: str,
filename: str | None = None,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
user_project: str | None = None,
) -> str | bytes:
"""
Download a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param filename: If set, a local file path where the file should be written to.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
"""
# TODO: future improvement check file size before downloading,
# to check for local space availability
if num_max_attempts is None:
num_max_attempts = 3
for attempt in range(num_max_attempts):
if attempt:
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 2**attempt
time.sleep(timeout_seconds)
try:
client = self.get_conn()
bucket = client.bucket(bucket_name, user_project=user_project)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if filename:
blob.download_to_filename(filename, timeout=timeout)
get_hook_lineage_collector().add_input_asset(
context=self, scheme="gs", asset_kwargs={"bucket": bucket.name, "key": blob.name}
)
get_hook_lineage_collector().add_output_asset(
context=self, scheme="file", asset_kwargs={"path": filename}
)
self.log.info("File downloaded to %s", filename)
return filename
get_hook_lineage_collector().add_input_asset(
context=self, scheme="gs", asset_kwargs={"bucket": bucket.name, "key": blob.name}
)
return blob.download_as_bytes()
except GoogleCloudError:
if attempt == num_max_attempts - 1:
self.log.error(
"Download attempt of object: %s from %s has failed. Attempt: %s, max %s.",
object_name,
bucket_name,
attempt,
num_max_attempts,
)
raise
raise NotImplementedError # should not reach this, but makes mypy happy
def download_as_byte_array(
self,
bucket_name: str,
object_name: str,
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int | None = 1,
) -> bytes:
"""
Download a file from Google Cloud Storage.
When no filename is supplied, the operator loads the file into memory and returns its
content. When a filename is supplied, it writes the file to the specified location and
returns the location. For file sizes that exceed the available memory it is recommended
to write to a file.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to download the file.
"""
# We do not pass filename, so will never receive string as response
return self.download(
bucket_name=bucket_name,
object_name=object_name,
chunk_size=chunk_size,
timeout=timeout,
num_max_attempts=num_max_attempts,
)
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: str | None = None,
object_url: str | None = None,
dir: str | None = None,
user_project: str | None = None,
) -> Generator[IO[bytes], None, None]:
"""
Download the file to a temporary directory and returns a file handle.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:param dir: The tmp sub directory to download the file to. (passed to NamedTemporaryFile)
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name, dir=dir) as tmp_file:
self.download(
bucket_name=bucket_name,
object_name=object_name,
filename=tmp_file.name,
user_project=user_project,
)
tmp_file.flush()
yield tmp_file
@_fallback_object_url_to_object_name_and_bucket_name()
@contextmanager
def provide_file_and_upload(
self,
bucket_name: str = PROVIDE_BUCKET,
object_name: str | None = None,
object_url: str | None = None,
user_project: str | None = None,
) -> Generator[IO[bytes], None, None]:
"""
Create temporary file, returns a file handle and uploads the files content on close.
You can use this method by passing the bucket_name and object_name parameters
or just object_url parameter.
:param bucket_name: The bucket to fetch from.
:param object_name: The object to fetch.
:param object_url: File reference url. Must start with "gs: //"
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
:return: File handler
"""
if object_name is None:
raise ValueError("Object name can not be empty")
_, _, file_name = object_name.rpartition("/")
with NamedTemporaryFile(suffix=file_name) as tmp_file:
yield tmp_file
tmp_file.flush()
self.upload(
bucket_name=bucket_name,
object_name=object_name,
filename=tmp_file.name,
user_project=user_project,
)
def upload(
self,
bucket_name: str,
object_name: str,
filename: str | None = None,
data: str | bytes | None = None,
mime_type: str | None = None,
gzip: bool = False,
encoding: str = "utf-8",
chunk_size: int | None = None,
timeout: int | None = DEFAULT_TIMEOUT,
num_max_attempts: int = 1,
metadata: dict | None = None,
cache_control: str | None = None,
user_project: str | None = None,
) -> None:
"""
Upload a local file or file data as string or bytes to Google Cloud Storage.
:param bucket_name: The bucket to upload to.
:param object_name: The object name to set when uploading the file.
:param filename: The local file path to the file to be uploaded.
:param data: The file's data as a string or bytes to be uploaded.
:param mime_type: The file's mime type set when uploading the file.
:param gzip: Option to compress local file or file data for upload
:param encoding: bytes encoding for file data if provided as string
:param chunk_size: Blob chunk size.
:param timeout: Request timeout in seconds.
:param num_max_attempts: Number of attempts to try to upload the file.
:param metadata: The metadata to be uploaded with the file.
:param cache_control: Cache-Control metadata field.
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
"""
def _call_with_retry(f: Callable[[], None]) -> None:
"""
Upload a file or a string with a retry mechanism and exponential back-off.
:param f: Callable that should be retried.
"""
for attempt in range(1, 1 + num_max_attempts):
try:
f()
except GoogleCloudError as e:
if attempt == num_max_attempts:
self.log.error(
"Upload attempt of object: %s from %s has failed. Attempt: %s, max %s.",
object_name,
object_name,
attempt,
num_max_attempts,
)
raise e
# Wait with exponential backoff scheme before retrying.
timeout_seconds = 2 ** (attempt - 1)
time.sleep(timeout_seconds)
client = self.get_conn()
bucket = client.bucket(bucket_name, user_project=user_project)
blob = bucket.blob(blob_name=object_name, chunk_size=chunk_size)
if metadata:
blob.metadata = metadata
if cache_control:
blob.cache_control = cache_control
if filename is not None and data is not None:
raise ValueError(
"'filename' and 'data' parameter provided. Please "
"specify a single parameter, either 'filename' for "
"local file uploads or 'data' for file content uploads."
)
if filename is not None:
if not mime_type:
mime_type = "application/octet-stream"
if gzip:
filename_gz = filename + ".gz"
with open(filename, "rb") as f_in, gz.open(filename_gz, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
_call_with_retry(
partial(blob.upload_from_filename, filename=filename, content_type=mime_type, timeout=timeout)
)
get_hook_lineage_collector().add_input_asset(
context=self, scheme="file", asset_kwargs={"path": filename}
)
if gzip:
os.remove(filename)
self.log.info("File %s uploaded to %s in %s bucket", filename, object_name, bucket_name)
elif data is not None:
if not mime_type:
mime_type = "text/plain"
if gzip:
if isinstance(data, str):
data = bytes(data, encoding)
out = BytesIO()
with gz.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
data = out.getvalue()
_call_with_retry(partial(blob.upload_from_string, data, content_type=mime_type, timeout=timeout))
self.log.info("Data stream uploaded to %s in %s bucket", object_name, bucket_name)
else:
raise ValueError("'filename' and 'data' parameter missing. One is required to upload to gcs.")
get_hook_lineage_collector().add_output_asset(
context=self, scheme="gs", asset_kwargs={"bucket": bucket.name, "key": blob.name}
)
def exists(
self,
bucket_name: str,
object_name: str,
retry: Retry = DEFAULT_RETRY,
user_project: str | None = None,
) -> bool:
"""
Check for the existence of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob_name to check in the Google cloud
storage bucket.
:param retry: (Optional) How to retry the RPC
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name, user_project=user_project)
blob = bucket.blob(blob_name=object_name)
return blob.exists(retry=retry)
def get_blob_update_time(self, bucket_name: str, object_name: str):
"""
Get the update time of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the blob to get updated time from the Google cloud
storage bucket.
"""
blob = self._get_blob(bucket_name, object_name)
return blob.updated
def is_updated_after(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Check if a blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s > %s", blob_update_time, ts)
if blob_update_time > ts:
return True
return False
def is_updated_between(
self, bucket_name: str, object_name: str, min_ts: datetime, max_ts: datetime
) -> bool:
"""
Check if a blob_name is updated in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param min_ts: The minimum timestamp to check against.
:param max_ts: The maximum timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not min_ts.tzinfo:
min_ts = min_ts.replace(tzinfo=timezone.utc)
if not max_ts.tzinfo:
max_ts = max_ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s is between %s and %s", blob_update_time, min_ts, max_ts)
if min_ts <= blob_update_time < max_ts:
return True
return False
def is_updated_before(self, bucket_name: str, object_name: str, ts: datetime) -> bool:
"""
Check if a blob_name is updated before given time in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param ts: The timestamp to check against.
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
if not ts.tzinfo:
ts = ts.replace(tzinfo=timezone.utc)
self.log.info("Verify object date: %s < %s", blob_update_time, ts)
if blob_update_time < ts:
return True
return False
def is_older_than(self, bucket_name: str, object_name: str, seconds: int) -> bool:
"""
Check if object is older than given time.
:param bucket_name: The Google Cloud Storage bucket where the object is.
:param object_name: The name of the object to check in the Google cloud
storage bucket.
:param seconds: The time in seconds to check against
"""
blob_update_time = self.get_blob_update_time(bucket_name, object_name)
if blob_update_time is not None:
from datetime import timedelta
current_time = timezone.utcnow()
given_time = current_time - timedelta(seconds=seconds)
self.log.info("Verify object date: %s is older than %s", blob_update_time, given_time)
if blob_update_time < given_time:
return True
return False
def delete(self, bucket_name: str, object_name: str) -> None:
"""
Delete an object from the bucket.
:param bucket_name: name of the bucket, where the object resides
:param object_name: name of the object to delete
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.blob(blob_name=object_name)
blob.delete()
get_hook_lineage_collector().add_input_asset(
context=self, scheme="gs", asset_kwargs={"bucket": bucket.name, "key": blob.name}
)
self.log.info("Blob %s deleted.", object_name)
def get_bucket(self, bucket_name: str) -> storage.Bucket:
"""
Get a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket
"""
return self.get_conn().bucket(bucket_name)
def delete_bucket(self, bucket_name: str, force: bool = False, user_project: str | None = None) -> None:
"""
Delete a bucket object from the Google Cloud Storage.
:param bucket_name: name of the bucket which will be deleted
:param force: false not allow to delete non empty bucket, set force=True
allows to delete non empty bucket
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name, user_project=user_project)
self.log.info("Deleting %s bucket", bucket_name)
try:
bucket.delete(force=force)
self.log.info("Bucket %s has been deleted", bucket_name)
except NotFound:
self.log.info("Bucket %s not exists", bucket_name)
def list(
self,
bucket_name: str,
versions: bool | None = None,
max_results: int | None = None,
prefix: str | List[str] | None = None,
delimiter: str | None = None,
match_glob: str | None = None,
user_project: str | None = None,
):
"""
List all objects from the bucket with the given a single prefix or multiple prefixes.
:param bucket_name: bucket name
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: string or list of strings which filter objects whose name begin with it/them
:param delimiter: (Deprecated) filters objects based on the delimiter (for e.g '.csv')
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``).
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
:return: a stream of object names matching the filtering criteria
"""
if delimiter and delimiter != "/":
warnings.warn(
"Usage of 'delimiter' param is deprecated, please use 'match_glob' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if match_glob and delimiter and delimiter != "/":
raise AirflowException("'match_glob' param cannot be used with 'delimiter' that differs than '/'")
objects = []
if isinstance(prefix, list):
for prefix_item in prefix:
objects.extend(
self._list(
bucket_name=bucket_name,
versions=versions,
max_results=max_results,
prefix=prefix_item,
delimiter=delimiter,
match_glob=match_glob,
user_project=user_project,
)
)
else:
objects.extend(
self._list(
bucket_name=bucket_name,
versions=versions,
max_results=max_results,
prefix=prefix,
delimiter=delimiter,
match_glob=match_glob,
user_project=user_project,
)
)
return objects
def _list(
self,
bucket_name: str,
versions: bool | None = None,
max_results: int | None = None,
prefix: str | None = None,
delimiter: str | None = None,
match_glob: str | None = None,
user_project: str | None = None,
) -> List:
"""
List all objects from the bucket with the give string prefix in name.
:param bucket_name: bucket name
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: string which filters objects whose name begin with it
:param delimiter: (Deprecated) filters objects based on the delimiter (for e.g '.csv')
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``).
:param user_project: The identifier of the Google Cloud project to bill for the request.
Required for Requester Pays buckets.
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name, user_project=user_project)
ids = []
page_token = None
while True:
if match_glob:
blobs = self._list_blobs_with_match_glob(
bucket=bucket,
client=client,
match_glob=match_glob,
max_results=max_results,
page_token=page_token,
path=bucket.path + "/o",
prefix=prefix,
versions=versions,
)
else:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = [blob.name for blob in blobs]
if blobs.prefixes:
ids.extend(blobs.prefixes)
else:
ids.extend(blob_names)
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
@staticmethod
def _list_blobs_with_match_glob(
bucket,
client,
path: str,
max_results: int | None = None,
page_token: str | None = None,
match_glob: str | None = None,
prefix: str | None = None,
versions: bool | None = None,
) -> Any:
"""
List blobs when match_glob param is given.
This method is a patched version of google.cloud.storage Client.list_blobs().
It is used as a temporary workaround to support "match_glob" param,
as it isn't officially supported by GCS Python client.
(follow `issue #1035<https://github.com/googleapis/python-storage/issues/1035>`__).
"""
from google.api_core import page_iterator
from google.cloud.storage.bucket import _blobs_page_start, _item_to_blob
extra_params: Any = {}
if prefix is not None:
extra_params["prefix"] = prefix
if match_glob is not None:
extra_params["matchGlob"] = match_glob
if versions is not None:
extra_params["versions"] = versions
api_request = functools.partial(
client._connection.api_request, timeout=DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
)
blobs: Any = page_iterator.HTTPIterator(
client=client,
api_request=api_request,
path=path,
item_to_value=_item_to_blob,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_blobs_page_start,
)
blobs.prefixes = set()
blobs.bucket = bucket
return blobs
def list_by_timespan(
self,
bucket_name: str,
timespan_start: datetime,
timespan_end: datetime,
versions: bool | None = None,
max_results: int | None = None,
prefix: str | None = None,
delimiter: str | None = None,
match_glob: str | None = None,
) -> List[str]:
"""
List all objects from the bucket with the given string prefix that were updated in the time range.
:param bucket_name: bucket name
:param timespan_start: will return objects that were updated at or after this datetime (UTC)
:param timespan_end: will return objects that were updated before this datetime (UTC)
:param versions: if true, list all versions of the objects
:param max_results: max count of items to return in a single page of responses
:param prefix: prefix string which filters objects whose name begin with
this prefix
:param delimiter: (Deprecated) filters objects based on the delimiter (for e.g '.csv')
:param match_glob: (Optional) filters objects based on the glob pattern given by the string
(e.g, ``'**/*/.json'``).
:return: a stream of object names matching the filtering criteria
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
ids = []
page_token = None
while True:
if match_glob:
blobs = self._list_blobs_with_match_glob(
bucket=bucket,
client=client,
match_glob=match_glob,
max_results=max_results,
page_token=page_token,
path=bucket.path + "/o",
prefix=prefix,
versions=versions,
)
else:
blobs = bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
versions=versions,
)
blob_names = [
blob.name
for blob in blobs
if timespan_start <= blob.updated.replace(tzinfo=timezone.utc) < timespan_end
]
if blobs.prefixes:
ids.extend(blobs.prefixes)
else:
ids.extend(blob_names)
page_token = blobs.next_page_token
if page_token is None:
# empty next page token
break
return ids
def _get_blob(self, bucket_name: str, object_name: str) -> Blob:
"""
Get a blob object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
"""
client = self.get_conn()
bucket = client.bucket(bucket_name)
blob = bucket.get_blob(blob_name=object_name)
if blob is None:
raise ValueError(f"Object ({object_name}) not found in Bucket ({bucket_name})")
return blob
def get_size(self, bucket_name: str, object_name: str) -> int:
"""
Get the size of a file in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google
cloud storage bucket_name.
"""
self.log.info("Checking the file size of object: %s in bucket_name: %s", object_name, bucket_name)
blob = self._get_blob(bucket_name, object_name)
blob_size = blob.size
self.log.info("The file size of %s is %s bytes.", object_name, blob_size)
return blob_size
def get_crc32c(self, bucket_name: str, object_name: str):
"""
Get the CRC32c checksum of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
"""
self.log.info(
"Retrieving the crc32c checksum of object_name: %s in bucket_name: %s",
object_name,
bucket_name,
)
blob = self._get_blob(bucket_name, object_name)
blob_crc32c = blob.crc32c
self.log.info("The crc32c checksum of %s is %s", object_name, blob_crc32c)
return blob_crc32c
def get_md5hash(self, bucket_name: str, object_name: str) -> str:
"""
Get the MD5 hash of an object in Google Cloud Storage.
:param bucket_name: The Google Cloud Storage bucket where the blob_name is.
:param object_name: The name of the object to check in the Google cloud
storage bucket_name.
"""
self.log.info("Retrieving the MD5 hash of object: %s in bucket: %s", object_name, bucket_name)
blob = self._get_blob(bucket_name, object_name)
blob_md5hash = blob.md5_hash
self.log.info("The md5Hash of %s is %s", object_name, blob_md5hash)
return blob_md5hash
def get_metadata(self, bucket_name: str, object_name: str) -> dict | None:
"""
Get the metadata of an object in Google Cloud Storage.
:param bucket_name: Name of the Google Cloud Storage bucket where the object is.
:param object_name: The name of the object containing the desired metadata
:return: The metadata associated with the object
"""
self.log.info("Retrieving the metadata dict of object (%s) in bucket (%s)", object_name, bucket_name)
blob = self._get_blob(bucket_name, object_name)
blob_metadata = blob.metadata
if blob_metadata:
self.log.info("Retrieved metadata of object (%s) with %s fields", object_name, len(blob_metadata))
else:
self.log.info("Metadata of object (%s) is empty or it does not exist", object_name)
return blob_metadata
@GoogleBaseHook.fallback_to_default_project_id
def create_bucket(
self,
bucket_name: str,
resource: dict | None = None,
storage_class: str = "MULTI_REGIONAL",
location: str = "US",
project_id: str = PROVIDE_PROJECT_ID,
labels: dict | None = None,
) -> str:
"""
Create a new bucket.
Google Cloud Storage uses a flat namespace, so you can't
create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket.
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage. Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:param location: The location of the bucket.
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:param project_id: The ID of the Google Cloud Project.
:param labels: User-provided labels, in key/value pairs.
:return: If successful, it returns the ``id`` of the bucket.
"""
self.log.info(
"Creating Bucket: %s; Location: %s; Storage Class: %s", bucket_name, location, storage_class
)
# Add airflow-version label to the bucket
labels = labels or {}
labels["airflow-version"] = "v" + version.replace(".", "-").replace("+", "-")
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket_resource = resource or {}
for item in bucket_resource:
if item != "name":
bucket._patch_property(name=item, value=resource[item]) # type: ignore[index]
bucket.storage_class = storage_class
bucket.labels = labels
bucket.create(project=project_id, location=location)
return bucket.id
def insert_bucket_acl(
self, bucket_name: str, entity: str, role: str, user_project: str | None = None
) -> None:
"""
Create a new ACL entry on the specified bucket_name.
See: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/insert
:param bucket_name: Name of a bucket_name.
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers.
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER", "WRITER".
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
"""
self.log.info("Creating a new ACL entry in bucket: %s", bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
bucket.acl.reload()
bucket.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
bucket.acl.user_project = user_project
bucket.acl.save()
self.log.info("A new ACL entry created in bucket: %s", bucket_name)
def insert_object_acl(
self,
bucket_name: str,
object_name: str,
entity: str,
role: str,
generation: int | None = None,
user_project: str | None = None,
) -> None:
"""
Create a new ACL entry on the specified object.
See: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert
:param bucket_name: Name of a bucket_name.
:param object_name: Name of the object. For information about how to URL encode
object names to be path safe, see:
https://cloud.google.com/storage/docs/json_api/#encoding
:param entity: The entity holding the permission, in one of the following forms:
user-userId, user-email, group-groupId, group-email, domain-domain,
project-team-projectId, allUsers, allAuthenticatedUsers
See: https://cloud.google.com/storage/docs/access-control/lists#scopes
:param role: The access permission for the entity.
Acceptable values are: "OWNER", "READER".
:param generation: Optional. If present, selects a specific revision of this object.
:param user_project: (Optional) The project to be billed for this request.
Required for Requester Pays buckets.
"""
self.log.info("Creating a new ACL entry for object: %s in bucket: %s", object_name, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name=bucket_name)
blob = bucket.blob(blob_name=object_name, generation=generation)
# Reload fetches the current ACL from Cloud Storage.
blob.acl.reload()
blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role})
if user_project:
blob.acl.user_project = user_project
blob.acl.save()
self.log.info("A new ACL entry created for object: %s in bucket: %s", object_name, bucket_name)
def compose(self, bucket_name: str, source_objects: List[str], destination_object: str) -> None:
"""
Composes a list of existing object into a new object in the same storage bucket_name.
Currently it only supports up to 32 objects that can be concatenated
in a single operation
https://cloud.google.com/storage/docs/json_api/v1/objects/compose
:param bucket_name: The name of the bucket containing the source objects.
This is also the same bucket to store the composed destination object.
:param source_objects: The list of source objects that will be composed
into a single object.
:param destination_object: The path of the object if given.
"""
if not source_objects:
raise ValueError("source_objects cannot be empty.")
if not bucket_name or not destination_object:
raise ValueError("bucket_name and destination_object cannot be empty.")
self.log.info("Composing %s to %s in the bucket %s", source_objects, destination_object, bucket_name)
client = self.get_conn()
bucket = client.bucket(bucket_name)
destination_blob = bucket.blob(destination_object)
source_blobs = [bucket.blob(blob_name=source_object) for source_object in source_objects]
destination_blob.compose(sources=source_blobs)
get_hook_lineage_collector().add_output_asset(
context=self, scheme="gs", asset_kwargs={"bucket": bucket.name, "key": destination_blob.name}
)
for single_source_blob in source_blobs:
get_hook_lineage_collector().add_input_asset(
context=self,
scheme="gs",
asset_kwargs={"bucket": bucket.name, "key": single_source_blob.name},
)
self.log.info("Completed successfully.")
def _sync_to_local_dir_delete_stale_local_files(self, current_gcs_objects: List[Path], local_dir: Path):
current_gcs_keys = {key.resolve() for key in current_gcs_objects}
for item in local_dir.rglob("*"):
if item.is_file():
if item.resolve() not in current_gcs_keys:
self.log.debug("Deleting stale local file: %s", item)
item.unlink()
# Clean up empty directories
for root, dirs, _ in os.walk(local_dir, topdown=False):
for d in dirs:
dir_path = os.path.join(root, d)
if not os.listdir(dir_path):
self.log.debug("Deleting stale empty directory: %s", dir_path)
os.rmdir(dir_path)
def _sync_to_local_dir_if_changed(self, blob: Blob, local_target_path: Path):
should_download = False
download_msg = ""
if not local_target_path.exists():
should_download = True
download_msg = f"Local file {local_target_path} does not exist."
else:
local_stats = local_target_path.stat()
# Reload blob to get fresh metadata, including size and updated time
blob.reload()
if blob.size != local_stats.st_size:
should_download = True
download_msg = (
f"GCS object size ({blob.size}) and local file size ({local_stats.st_size}) differ."
)
gcs_last_modified = blob.updated
if (
not should_download
and gcs_last_modified
and local_stats.st_mtime < gcs_last_modified.timestamp()
):
should_download = True
download_msg = f"GCS object last modified ({gcs_last_modified}) is newer than local file last modified ({datetime.fromtimestamp(local_stats.st_mtime, tz=timezone.utc)})."
if should_download:
self.log.debug("%s Downloading %s to %s", download_msg, blob.name, local_target_path.as_posix())
self.download(
bucket_name=blob.bucket.name, object_name=blob.name, filename=str(local_target_path)
)
else:
self.log.debug(
"Local file %s is up-to-date with GCS object %s. Skipping download.",
local_target_path.as_posix(),
blob.name,
)
def sync_to_local_dir(
self,
bucket_name: str,
local_dir: str | Path,
prefix: str | None = None,
delete_stale: bool = False,
) -> None:
"""
Download files from a GCS bucket to a local directory.
It will download all files from the given ``prefix`` and create the corresponding
directory structure in the ``local_dir``.
If ``delete_stale`` is ``True``, it will delete all local files that do not exist in the GCS bucket.
:param bucket_name: The name of the GCS bucket.
:param local_dir: The local directory to which the files will be downloaded.
:param prefix: The prefix of the files to be downloaded.
:param delete_stale: If ``True``, deletes local files that don't exist in the bucket.
"""
prefix = prefix or ""
local_dir_path = Path(local_dir)
self.log.debug("Downloading data from gs://%s/%s to %s", bucket_name, prefix, local_dir_path)
gcs_bucket = self.get_bucket(bucket_name)
local_gcs_objects = []
for blob in gcs_bucket.list_blobs(prefix=prefix):
# GCS lists "directories" as objects ending with a slash. We should skip them.
if blob.name.endswith("/"):
continue
blob_path = Path(blob.name)
local_target_path = local_dir_path.joinpath(blob_path.relative_to(prefix))
if not local_target_path.parent.exists():
local_target_path.parent.mkdir(parents=True, exist_ok=True)
self.log.debug("Created local directory: %s", local_target_path.parent)
self._sync_to_local_dir_if_changed(blob=blob, local_target_path=local_target_path)
local_gcs_objects.append(local_target_path)
if delete_stale:
self._sync_to_local_dir_delete_stale_local_files(
current_gcs_objects=local_gcs_objects, local_dir=local_dir_path
)
def sync(
self,
source_bucket: str,
destination_bucket: str,
source_object: str | None = None,
destination_object: str | None = None,
recursive: bool = True,
allow_overwrite: bool = False,
delete_extra_files: bool = False,
) -> None:
"""
Synchronize the contents of the buckets.
Parameters ``source_object`` and ``destination_object`` describe the root sync directories. If they
are not passed, the entire bucket will be synchronized. If they are passed, they should point
to directories.
.. note::
The synchronization of individual files is not supported. Only entire directories can be
synchronized.
:param source_bucket: The name of the bucket containing the source objects.
:param destination_bucket: The name of the bucket containing the destination objects.
:param source_object: The root sync directory in the source bucket.
:param destination_object: The root sync directory in the destination bucket.
:param recursive: If True, subdirectories will be considered
:param recursive: If True, subdirectories will be considered
:param allow_overwrite: if True, the files will be overwritten if a mismatched file is found.
By default, overwriting files is not allowed
:param delete_extra_files: if True, deletes additional files from the source that not found in the
destination. By default extra files are not deleted.
.. note::
This option can delete data quickly if you specify the wrong source/destination combination.
:return: none
"""
client = self.get_conn()
# Create bucket object
source_bucket_obj = client.bucket(source_bucket)
destination_bucket_obj = client.bucket(destination_bucket)
# Normalize parameters when they are passed
source_object = normalize_directory_path(source_object)
destination_object = normalize_directory_path(destination_object)
# Calculate the number of characters that remove from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
# Prepare synchronization plan
to_copy_blobs, to_delete_blobs, to_rewrite_blobs = self._prepare_sync_plan(
source_bucket=source_bucket_obj,
destination_bucket=destination_bucket_obj,
source_object=source_object,
destination_object=destination_object,
recursive=recursive,
)
self.log.info(
"Planned synchronization. To delete blobs count: %s, to upload blobs count: %s, "
"to rewrite blobs count: %s",
len(to_delete_blobs),
len(to_copy_blobs),
len(to_rewrite_blobs),
)
# Copy missing object to new bucket
if not to_copy_blobs:
self.log.info("Skipped blobs copying.")
else:
for blob in to_copy_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs copied.")
# Delete redundant files
if not to_delete_blobs:
self.log.info("Skipped blobs deleting.")
elif delete_extra_files:
# TODO: Add batch. I tried to do it, but the Google library is not stable at the moment.
for blob in to_delete_blobs:
self.delete(blob.bucket.name, blob.name)
self.log.info("Blobs deleted.")
# Overwrite files that are different
if not to_rewrite_blobs:
self.log.info("Skipped blobs overwriting.")
elif allow_overwrite:
for blob in to_rewrite_blobs:
dst_object = self._calculate_sync_destination_path(
blob, destination_object, source_object_prefix_len
)
self.rewrite(
source_bucket=source_bucket_obj.name,
source_object=blob.name,
destination_bucket=destination_bucket_obj.name,
destination_object=dst_object,
)
self.log.info("Blobs rewritten.")
self.log.info("Synchronization finished.")
def _calculate_sync_destination_path(
self, blob: storage.Blob, destination_object: str | None, source_object_prefix_len: int
) -> str:
return (
os.path.join(destination_object, blob.name[source_object_prefix_len:])
if destination_object
else blob.name[source_object_prefix_len:]
)
@staticmethod
def _prepare_sync_plan(
source_bucket: storage.Bucket,
destination_bucket: storage.Bucket,
source_object: str | None,
destination_object: str | None,
recursive: bool,
) -> tuple[set[storage.Blob], set[storage.Blob], set[storage.Blob]]:
# Calculate the number of characters that are removed from the name, because they contain information
# about the parent's path
source_object_prefix_len = len(source_object) if source_object else 0
destination_object_prefix_len = len(destination_object) if destination_object else 0
delimiter = "/" if not recursive else None
# Fetch blobs list
source_blobs = list(source_bucket.list_blobs(prefix=source_object, delimiter=delimiter))
destination_blobs = list(
destination_bucket.list_blobs(prefix=destination_object, delimiter=delimiter)
)
# Create indexes that allow you to identify blobs based on their name
source_names_index = {a.name[source_object_prefix_len:]: a for a in source_blobs}
destination_names_index = {a.name[destination_object_prefix_len:]: a for a in destination_blobs}
# Create sets with names without parent object name
source_names = set(source_names_index.keys())
# Discards empty string from source set that creates an empty subdirectory in
# destination bucket with source subdirectory name
source_names.discard("")
destination_names = set(destination_names_index.keys())
# Determine objects to copy and delete
to_copy = source_names - destination_names
to_delete = destination_names - source_names
to_copy_blobs: set[storage.Blob] = {source_names_index[a] for a in to_copy}
to_delete_blobs: set[storage.Blob] = {destination_names_index[a] for a in to_delete}
# Find names that are in both buckets
names_to_check = source_names.intersection(destination_names)
to_rewrite_blobs: set[storage.Blob] = set()
# Compare objects based on crc32
for current_name in names_to_check:
source_blob = source_names_index[current_name]
destination_blob = destination_names_index[current_name]
# If either object is CMEK-protected, use the Cloud Storage Objects Get API to retrieve them
# so that the crc32c is included
if source_blob.kms_key_name:
source_blob = source_bucket.get_blob(source_blob.name, generation=source_blob.generation)
if destination_blob.kms_key_name:
destination_blob = destination_bucket.get_blob(
destination_blob.name, generation=destination_blob.generation
)
# if the objects are different, save it
if source_blob.crc32c != destination_blob.crc32c:
to_rewrite_blobs.add(source_blob)
return to_copy_blobs, to_delete_blobs, to_rewrite_blobs
def gcs_object_is_directory(bucket: str) -> bool:
"""Return True if given Google Cloud Storage URL (gs://<bucket>/<blob>) is a directory or empty bucket."""
_, blob = _parse_gcs_url(bucket)
return len(blob) == 0 or blob.endswith("/")
def parse_json_from_gcs(
gcp_conn_id: str,
file_uri: str,
impersonation_chain: str | Sequence[str] | None = None,
) -> Any:
"""
Download and parses json file from Google cloud Storage.
:param gcp_conn_id: Airflow Google Cloud connection ID.
:param file_uri: full path to json file
example: ``gs://test-bucket/dir1/dir2/file``
"""
gcs_hook = GCSHook(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
bucket, blob = _parse_gcs_url(file_uri)
with NamedTemporaryFile(mode="w+b") as file:
try:
gcs_hook.download(bucket_name=bucket, object_name=blob, filename=file.name)
except GoogleAPICallError as ex:
raise AirflowException(f"Failed to download file with query result: {ex}")
file.seek(0)
try:
json_data = file.read()
except (ValueError, OSError, RuntimeError) as ex:
raise AirflowException(f"Failed to read file: {ex}")
try:
result = json.loads(json_data)
except json.JSONDecodeError as ex:
raise AirflowException(f"Failed to decode query result from bytes to json: {ex}")
return result
def _parse_gcs_url(gsurl: str) -> tuple[str, str]:
"""
Given a Google Cloud Storage URL, return a tuple containing the corresponding bucket and blob.
Expected url format: gs://<bucket>/<blob>
"""
parsed_url = urlsplit(gsurl)
if not parsed_url.netloc:
raise AirflowException("Please provide a bucket name")
if parsed_url.scheme.lower() != "gs":
raise AirflowException(f"Schema must be to 'gs://': Current schema: '{parsed_url.scheme}://'")
bucket = parsed_url.netloc
# Remove leading '/' but NOT trailing one
blob = parsed_url.path.lstrip("/")
return bucket, blob
| GCSHook |
python | huggingface__transformers | src/transformers/models/afmoe/modeling_afmoe.py | {
"start": 27524,
"end": 30565
} | class ____(AfmoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = AfmoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, AfmoeForCausalLM
>>> model = AfmoeForCausalLM.from_pretrained("meta-afmoe/Afmoe-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-afmoe/Afmoe-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["AfmoeForCausalLM", "AfmoeModel", "AfmoePreTrainedModel"]
| AfmoeForCausalLM |
python | huggingface__transformers | src/transformers/models/glm4v/modular_glm4v.py | {
"start": 16764,
"end": 17302
} | class ____(Qwen2_5_VisionPatchEmbed):
def __init__(self, config: Glm4vVisionConfig) -> None:
nn.Module.__init__(self)
self.patch_size = config.patch_size
self.temporal_patch_size = config.temporal_patch_size
self.in_channels = config.in_channels
self.embed_dim = config.hidden_size
kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size)
| Glm4vVisionPatchEmbed |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/InteractiveParameter.py | {
"start": 239,
"end": 2086
} | class ____:
"""Just for testing purposes"""
value = None
def printResult(func):
@wraps(func)
def wrapper(*args, **kwargs):
LAST_RESULT.value = func(*args, **kwargs)
QtWidgets.QMessageBox.information(
QtWidgets.QApplication.activeWindow(),
"Function Run!",
f"Func result: {LAST_RESULT.value}",
)
return wrapper
host = Parameter.create(name="Interactive Parameter Use", type="group")
interactor = Interactor(parent=host, runOptions=RunOptions.ON_CHANGED)
@interactor.decorate()
@printResult
def easySample(a=5, b=6):
return a + b
@interactor.decorate()
@printResult
def stringParams(a="5", b="6"):
return a + b
@interactor.decorate(a=10)
@printResult
def requiredParam(a, b=10):
return a + b
@interactor.decorate(ignores=["a"])
@printResult
def ignoredAParam(a=10, b=20):
return a * b
@interactor.decorate(runOptions=RunOptions.ON_ACTION)
@printResult
def runOnButton(a=10, b=20):
return a + b
x = 5
@printResult
def accessVarInDifferentScope(x, y=10):
return x + y
func_interactive = InteractiveFunction(
accessVarInDifferentScope, closures={"x": lambda: x}
)
# Value is redeclared, but still bound
x = 10
interactor(func_interactive)
with interactor.optsContext(titleFormat=str.upper):
@interactor.decorate()
@printResult
def capslocknames(a=5):
return a
@interactor.decorate(
runOptions=(RunOptions.ON_CHANGED, RunOptions.ON_ACTION),
a={"type": "list", "limits": [5, 10, 20]},
)
@printResult
def runOnBtnOrChange_listOpts(a=5):
return a
@interactor.decorate(nest=False)
@printResult
def onlyTheArgumentsAppear(thisIsAFunctionArg=True):
return thisIsAFunctionArg
tree = ParameterTree()
tree.setParameters(host)
tree.show()
if __name__ == "__main__":
pg.exec()
| LAST_RESULT |
python | kamyu104__LeetCode-Solutions | Python/count-submatrices-with-equal-frequency-of-x-and-y.py | {
"start": 727,
"end": 1650
} | class ____(object):
def numberOfSubmatrices(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
result = 0
dp1 = [[0]*len(grid[0]) for _ in xrange(len(grid))]
dp2 = [[0]*len(grid[0]) for _ in xrange(len(grid))]
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if i-1 >= 0:
dp1[i][j] += dp1[i-1][j]
dp2[i][j] += dp2[i-1][j]
if j-1 >= 0:
dp1[i][j] += dp1[i][j-1]
dp2[i][j] += dp2[i][j-1]
if i-1 >= 0 and j-1 >= 0:
dp1[i][j] -= dp1[i-1][j-1]
dp2[i][j] -= dp2[i-1][j-1]
dp1[i][j] += int(grid[i][j] == 'X')
dp2[i][j] += int(grid[i][j] == 'Y')
result += int(dp1[i][j] == dp2[i][j] != 0)
return result
| Solution2 |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py | {
"start": 1067,
"end": 2453
} | class ____(BaseChecker):
"""Checks string formatting operations to ensure that the format string
is valid and the arguments match the format string.
"""
name = 'string'
msgs = MSGS
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
"""Visit a call node."""
func = utils.safe_infer(node.func)
if (isinstance(func, astroid.bases.BoundMethod)
and isinstance(func.bound, astroid.bases.Instance)
and func.bound.name in ('str', 'unicode', 'bytes')):
if func.name == 'format':
self._check_new_format(node, func)
def _check_new_format(self, node, func):
""" Check the new string formatting """
if (isinstance(node.func, astroid.nodes.Attribute)
and not isinstance(node.func.expr, astroid.nodes.Const)):
return
try:
strnode = next(func.bound.infer())
except astroid.exceptions.InferenceError:
return
if not isinstance(strnode, astroid.nodes.Const):
return
if isinstance(strnode.value, bytes):
self.add_message('ansible-no-format-on-bytestring', node=node)
return
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleStringFormatChecker(linter))
| AnsibleStringFormatChecker |
python | huggingface__transformers | src/transformers/models/siglip2/modeling_siglip2.py | {
"start": 15545,
"end": 18315
} | class ____(PreTrainedModel):
config: Siglip2Config
base_model_prefix = "siglip2"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = [
"Siglip2TextEmbeddings",
"Siglip2VisionEmbeddings",
"Siglip2EncoderLayer",
"Siglip2MultiheadAttentionPoolingHead",
]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Siglip2EncoderLayer,
"attentions": Siglip2Attention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, Siglip2VisionEmbeddings):
width = (
self.config.vision_config.hidden_size
if isinstance(self.config, Siglip2Config)
else self.config.hidden_size
)
init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
elif isinstance(module, nn.Embedding):
default_flax_embed_init(module.weight)
elif isinstance(module, Siglip2Attention):
init.xavier_uniform_(module.q_proj.weight)
init.xavier_uniform_(module.k_proj.weight)
init.xavier_uniform_(module.v_proj.weight)
init.xavier_uniform_(module.out_proj.weight)
init.zeros_(module.q_proj.bias)
init.zeros_(module.k_proj.bias)
init.zeros_(module.v_proj.bias)
init.zeros_(module.out_proj.bias)
elif isinstance(module, Siglip2MLP):
init.xavier_uniform_(module.fc1.weight)
init.xavier_uniform_(module.fc2.weight)
init.normal_(module.fc1.bias, std=1e-6)
init.normal_(module.fc2.bias, std=1e-6)
elif isinstance(module, Siglip2MultiheadAttentionPoolingHead):
init.xavier_uniform_(module.probe)
init.xavier_uniform_(module.attention.in_proj_weight)
init.zeros_(module.attention.in_proj_bias)
elif isinstance(module, Siglip2Model):
init.zeros_(module.logit_scale)
init.zeros_(module.logit_bias)
elif isinstance(module, Siglip2ForImageClassification):
init.normal_(
module.classifier.weight,
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, (nn.Linear, nn.Conv2d)):
lecun_normal_(module.weight)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
| Siglip2PreTrainedModel |
python | django__django | tests/composite_pk/models/tenant.py | {
"start": 44,
"end": 141
} | class ____(models.Model):
name = models.CharField(max_length=10, default="", blank=True)
| Tenant |
python | django__django | tests/one_to_one/models.py | {
"start": 1983,
"end": 2089
} | class ____(models.Model):
other = models.OneToOneField(Target, models.CASCADE, primary_key=True)
| Pointer |
python | sqlalchemy__sqlalchemy | test/orm/test_events.py | {
"start": 72031,
"end": 73213
} | class ____(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
cls.mapper_registry.map_imperatively(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_just_loaded(self):
User = self.classes.User
canary = self._fixture()
sess = fixture_session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).first()
eq_(canary, ["load"])
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = fixture_session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, ["load"])
| LoadTest |
python | kamyu104__LeetCode-Solutions | Python/threshold-majority-queries.py | {
"start": 120,
"end": 2668
} | class ____(object):
def subarrayMajority(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
# reference: https://cp-algorithms.com/data_structures/sqrt_decomposition.html
def mo_s_algorithm(): # Time: O(QlogQ + (N + Q) * sqrt(N) + Q * N)
def add(i): # Time: O(F) = O(1)
idx = num_to_idx[nums[i]]
if cnt[idx]:
cnt2[cnt[idx]] -= 1
cnt[idx] += 1
cnt2[cnt[idx]] += 1
max_freq[0] = max(max_freq[0], cnt[idx])
def remove(i): # Time: O(F) = O(1)
idx = num_to_idx[nums[i]]
cnt2[cnt[idx]] -= 1
if not cnt2[max_freq[0]]:
max_freq[0] -= 1
cnt[idx] -= 1
if cnt[idx]:
cnt2[cnt[idx]] += 1
def get_ans(t): # Time: O(A) = O(N)
if max_freq[0] < t:
return -1
i = next(i for i in xrange(len(cnt)) if cnt[i] == max_freq[0])
return sorted_nums[i]
cnt = [0]*len(num_to_idx)
cnt2 = [0]*(len(nums)+1)
max_freq = [0]
result = [-1]*len(queries)
block_size = int(len(nums)**0.5)+1 # O(S) = O(sqrt(N))
idxs = range(len(queries))
idxs.sort(key=lambda x: (queries[x][0]//block_size, queries[x][1] if (queries[x][0]//block_size)&1 else -queries[x][1])) # Time: O(QlogQ)
left, right = 0, -1
for i in idxs: # Time: O((N / S) * N * F + S * Q * F + Q * A) = O((N + Q) * sqrt(N) + Q * N), O(S) = O(sqrt(N)), O(F) = O(logN), O(A) = O(1)
l, r, t = queries[i]
while left > l:
left -= 1
add(left)
while right < r:
right += 1
add(right)
while left < l:
remove(left)
left += 1
while right > r:
remove(right)
right -= 1
result[i] = get_ans(t)
return result
sorted_nums = sorted(set(nums))
num_to_idx = {x:i for i, x in enumerate(sorted_nums)}
return mo_s_algorithm()
# Time: O(nlogn + qlogq + (n + q) * sqrt(n) * logn)
# Space: O(n + q)
from sortedcontainers import SortedList
# sort, coordinate compression, mo's algorithm, sorted list
| Solution |
python | celery__celery | celery/exceptions.py | {
"start": 7431,
"end": 7529
} | class ____(TaskError):
"""The task has been revoked, so no result available."""
| TaskRevokedError |
python | huggingface__transformers | src/transformers/models/t5gemma/modular_t5gemma.py | {
"start": 15725,
"end": 19653
} | class ____(Gemma2Attention):
def __init__(self, config: T5GemmaModuleConfig, layer_idx: int):
super().__init__(config, layer_idx)
del self.sliding_window
del self.layer_type
self.is_causal = False
if config.cross_attention_hidden_size is None:
raise ValueError("Cross-attention needs cross_attention_hidden_size to be specified.")
self.k_proj = nn.Linear(
config.cross_attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.cross_attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
encoder_hidden_states: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if encoder_hidden_states is None:
raise ValueError("Encoder hidden state is required for cross attention.")
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
if past_key_values is not None:
is_updated = past_key_values.is_updated.get(self.layer_idx)
curr_past_key_values = past_key_values.cross_attention_cache
if past_key_values is None or not is_updated:
encoder_input_shape = encoder_hidden_states.shape[:-1]
encoder_hidden_shape = (*encoder_input_shape, -1, self.head_dim)
key_states = self.k_proj(encoder_hidden_states).view(encoder_hidden_shape).transpose(1, 2)
value_states = self.v_proj(encoder_hidden_states).view(encoder_hidden_shape).transpose(1, 2)
if past_key_values is not None:
key_states, value_states = curr_past_key_values.update(key_states, value_states, self.layer_idx)
past_key_values.is_updated[self.layer_idx] = True
else:
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=None,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
def bidirectional_mask_function(attention_mask: Optional[torch.Tensor]) -> Callable:
"""
This creates bidirectional attention mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
if attention_mask is None:
return torch.ones((), dtype=torch.bool)
return attention_mask[batch_idx, kv_idx].to(torch.bool)
return inner_mask
def sliding_window_bidirectional_mask_function(sliding_window: int) -> Callable:
"""
This creates bidirectional attention mask with sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return (q_idx - sliding_window < kv_idx) & (kv_idx < q_idx + sliding_window)
return inner_mask
| T5GemmaCrossAttention |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/scanner.py | {
"start": 82103,
"end": 89273
} | class ____(Scanner): # RoundTripScanner Split Comments
def __init__(self, *arg, **kw):
# type: (Any, Any) -> None
super().__init__(*arg, **kw)
assert self.loader is not None
# comments isinitialised on .need_more_tokens and persist on
# self.loader.parsed_comments
self.comments = None
def get_token(self):
# type: () -> Any
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if len(self.tokens) > 0:
if isinstance(self.tokens[0], BlockEndToken):
self.comments.assign_post(self.tokens[0]) # type: ignore
else:
self.comments.assign_pre(self.tokens[0]) # type: ignore
self.tokens_taken += 1
return self.tokens.pop(0)
def need_more_tokens(self):
# type: () -> bool
if self.comments is None:
self.loader.parsed_comments = self.comments = ScannedComments() # type: ignore
if self.done:
return False
if len(self.tokens) == 0:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
if len(self.tokens) < 2:
return True
if self.tokens[0].start_mark.line == self.tokens[-1].start_mark.line:
return True
if True:
xprintf('-x--', len(self.tokens))
for t in self.tokens:
xprintf(t)
# xprintf(self.comments.last())
xprintf(self.comments.str_unprocessed()) # type: ignore
self.comments.assign_pre(self.tokens[0]) # type: ignore
self.comments.assign_eol(self.tokens) # type: ignore
return False
def scan_to_next_token(self):
# type: () -> None
srp = self.reader.peek
srf = self.reader.forward
if self.reader.index == 0 and srp() == '\uFEFF':
srf()
start_mark = self.reader.get_mark()
# xprintf('current_mark', start_mark.line, start_mark.column)
found = False
while not found:
while srp() == ' ':
srf()
ch = srp()
if ch == '#':
comment_start_mark = self.reader.get_mark()
comment = ch
srf() # skipt the '#'
while ch not in _THE_END:
ch = srp()
if ch == '\0': # don't gobble the end-of-stream character
# but add an explicit newline as "YAML processors should terminate
# the stream with an explicit line break
# https://yaml.org/spec/1.2/spec.html#id2780069
comment += '\n'
break
comment += ch
srf()
# we have a comment
if start_mark.column == 0:
self.comments.add_full_line_comment( # type: ignore
comment, comment_start_mark.column, comment_start_mark.line
)
else:
self.comments.add_eol_comment( # type: ignore
comment, comment_start_mark.column, comment_start_mark.line
)
comment = ""
# gather any blank lines or full line comments following the comment as well
self.scan_empty_or_full_line_comments()
if not self.flow_level:
self.allow_simple_key = True
return
if bool(self.scan_line_break()):
# start_mark = self.reader.get_mark()
if not self.flow_level:
self.allow_simple_key = True
self.scan_empty_or_full_line_comments()
return None
ch = srp()
if ch == '\n': # empty toplevel lines
start_mark = self.reader.get_mark()
comment = ""
while ch:
ch = self.scan_line_break(empty_line=True)
comment += ch
if srp() == '#':
# empty line followed by indented real comment
comment = comment.rsplit('\n', 1)[0] + '\n'
_ = self.reader.get_mark() # gobble end_mark
return None
else:
found = True
return None
def scan_empty_or_full_line_comments(self):
# type: () -> None
blmark = self.reader.get_mark()
assert blmark.column == 0
blanks = ""
comment = None
mark = None
ch = self.reader.peek()
while True:
# nprint('ch', repr(ch), self.reader.get_mark().column)
if ch in '\r\n\x85\u2028\u2029':
if self.reader.prefix(2) == '\r\n':
self.reader.forward(2)
else:
self.reader.forward()
if comment is not None:
comment += '\n'
self.comments.add_full_line_comment(comment, mark.column, mark.line)
comment = None
else:
blanks += '\n'
self.comments.add_blank_line(blanks, blmark.column, blmark.line) # type: ignore # NOQA
blanks = ""
blmark = self.reader.get_mark()
ch = self.reader.peek()
continue
if comment is None:
if ch in ' \t':
blanks += ch
elif ch == '#':
mark = self.reader.get_mark()
comment = '#'
else:
# xprintf('breaking on', repr(ch))
break
else:
comment += ch
self.reader.forward()
ch = self.reader.peek()
def scan_block_scalar_ignored_line(self, start_mark):
# type: (Any) -> Any
# See the specification for details.
srp = self.reader.peek
srf = self.reader.forward
prefix = ''
comment = None
while srp() == ' ':
prefix += srp()
srf()
if srp() == '#':
comment = ''
mark = self.reader.get_mark()
while srp() not in _THE_END:
comment += srp()
srf()
comment += '\n' # type: ignore
ch = srp()
if ch not in _THE_END:
raise ScannerError(
'while scanning a block scalar',
start_mark,
_F('expected a comment or a line break, but found {ch!r}', ch=ch),
self.reader.get_mark(),
)
if comment is not None:
self.comments.add_eol_comment(comment, mark.column, mark.line) # type: ignore
self.scan_line_break()
return None
| RoundTripScannerSC |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_circulant.py | {
"start": 50732,
"end": 59887
} | class ____(_BaseLinearOperatorCirculant):
"""`LinearOperator` acting like a nested block circulant matrix.
This operator acts like a block circulant matrix `A` with
shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of block circulant matrices
If `A` is nested block circulant, with block sizes `N0, N1, N2`
(`N0 * N1 * N2 = N`):
`A` has a block structure, composed of `N0 x N0` blocks, with each
block an `N1 x N1` block circulant matrix.
For example, with `W`, `X`, `Y`, `Z` each block circulant,
```
A = |W Z Y X|
|X W Z Y|
|Y X W Z|
|Z Y X W|
```
Note that `A` itself will not in general be circulant.
#### Description in terms of the frequency spectrum
There is an equivalent description in terms of the [batch] spectrum `H` and
Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
dimensions.
If `H.shape = [N0, N1, N2]`, (`N0 * N1 * N2 = N`):
Loosely speaking, matrix multiplication is equal to the action of a
Fourier multiplier: `A u = IDFT3[ H DFT3[u] ]`.
Precisely speaking, given `[N, R]` matrix `u`, let `DFT3[u]` be the
`[N0, N1, N2, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, N2, R]` and
taking a three dimensional DFT across the first three dimensions. Let `IDFT3`
be the inverse of `DFT3`. Matrix multiplication may be expressed columnwise:
```(A u)_r = IDFT3[ H * (DFT3[u])_r ]```
#### Operator properties deduced from the spectrum.
* This operator is positive definite if and only if `Real{H} > 0`.
A general property of Fourier transforms is the correspondence between
Hermitian functions and real valued transforms.
Suppose `H.shape = [B1,...,Bb, N0, N1, N2]`, we say that `H` is a Hermitian
spectrum if, with `%` meaning modulus division,
```
H[..., n0 % N0, n1 % N1, n2 % N2]
= ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1, (-n2) % N2] ].
```
* This operator corresponds to a real matrix if and only if `H` is Hermitian.
* This operator is self-adjoint if and only if `H` is real.
See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
### Examples
See `LinearOperatorCirculant` and `LinearOperatorCirculant2D` for examples.
#### Performance
Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(R*N*Log[N])`
* `operator.solve(x)` is `O(R*N*Log[N])`
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
spectrum: tensor.Tensor,
input_output_dtype=dtypes.complex64,
is_non_singular: bool = None,
is_self_adjoint: bool = None,
is_positive_definite: bool = None,
is_square: bool = True,
name="LinearOperatorCirculant3D"):
"""Initialize an `LinearOperatorCirculant`.
This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
by providing `spectrum`, a `[B1,...,Bb, N0, N1, N2]` `Tensor`
with `N0*N1*N2 = N`.
If `input_output_dtype = DTYPE`:
* Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
* Values returned by all methods, such as `matmul` or `determinant` will be
cast to `DTYPE`.
Note that if the spectrum is not Hermitian, then this operator corresponds
to a complex matrix with non-zero imaginary part. In this case, setting
`input_output_dtype` to a real type will forcibly cast the output to be
real, resulting in incorrect results!
If on the other hand the spectrum is Hermitian, then this operator
corresponds to a real-valued matrix, and setting `input_output_dtype` to
a real type is fine.
Args:
spectrum: Shape `[B1,...,Bb, N0, N1, N2]` `Tensor`. Allowed dtypes:
`float16`, `float32`, `float64`, `complex64`, `complex128`.
Type can be different than `input_output_dtype`
input_output_dtype: `dtype` for input/output.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `spectrum` is real, this will always be true.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name to prepend to all ops created by this class.
"""
parameters = dict(
spectrum=spectrum,
input_output_dtype=input_output_dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
super(LinearOperatorCirculant3D, self).__init__(
spectrum,
block_depth=3,
input_output_dtype=input_output_dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _linop_adjoint(self) -> "LinearOperatorCirculant3D":
spectrum = self.spectrum
if spectrum.dtype.is_complex:
spectrum = math_ops.conj(spectrum)
# Conjugating the spectrum is sufficient to get the adjoint.
return LinearOperatorCirculant3D(
spectrum=spectrum,
is_non_singular=self.is_non_singular,
is_self_adjoint=self.is_self_adjoint,
is_positive_definite=self.is_positive_definite,
is_square=True)
def _linop_inverse(self) -> "LinearOperatorCirculant3D":
return LinearOperatorCirculant3D(
spectrum=1. / self.spectrum,
is_non_singular=self.is_non_singular,
is_self_adjoint=self.is_self_adjoint,
is_positive_definite=self.is_positive_definite,
is_square=True,
input_output_dtype=self.dtype)
def _linop_matmul(
self,
left_operator: "LinearOperatorCirculant3D",
right_operator: linear_operator.LinearOperator,
) -> linear_operator.LinearOperator:
if not isinstance(
right_operator, LinearOperatorCirculant3D
) or not isinstance(left_operator, type(right_operator)):
return super()._linop_matmul(left_operator, right_operator)
return LinearOperatorCirculant3D(
spectrum=left_operator.spectrum * right_operator.spectrum,
is_non_singular=property_hint_util.combined_non_singular_hint(
left_operator, right_operator
),
is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
left_operator, right_operator
),
is_positive_definite=(
property_hint_util.combined_commuting_positive_definite_hint(
left_operator, right_operator
)
),
is_square=True,
)
def _linop_solve(
self,
left_operator: "LinearOperatorCirculant3D",
right_operator: linear_operator.LinearOperator,
) -> linear_operator.LinearOperator:
if not isinstance(right_operator, LinearOperatorCirculant3D):
return super()._linop_solve(left_operator, right_operator)
return LinearOperatorCirculant3D(
spectrum=right_operator.spectrum / left_operator.spectrum,
is_non_singular=property_hint_util.combined_non_singular_hint(
left_operator, right_operator),
is_self_adjoint=property_hint_util.combined_commuting_self_adjoint_hint(
left_operator, right_operator),
is_positive_definite=(
property_hint_util.combined_commuting_positive_definite_hint(
left_operator, right_operator)),
is_square=True)
def _to_complex(x):
if x.dtype.is_complex:
return x
dtype = dtypes.complex64
if x.dtype == dtypes.float64:
dtype = dtypes.complex128
return math_ops.cast(x, dtype)
| LinearOperatorCirculant3D |
python | scrapy__scrapy | tests/test_spidermiddleware_httperror.py | {
"start": 452,
"end": 2001
} | class ____(MockServerSpider):
name = "httperror"
bypass_status_codes: set[int] = set()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_urls = [
self.mockserver.url("/status?n=200"),
self.mockserver.url("/status?n=404"),
self.mockserver.url("/status?n=402"),
self.mockserver.url("/status?n=500"),
]
self.failed = set()
self.skipped = set()
self.parsed = set()
async def start(self):
for url in self.start_urls:
yield Request(url, self.parse, errback=self.on_error)
def parse(self, response):
self.parsed.add(response.url[-3:])
def on_error(self, failure):
if isinstance(failure.value, HttpError):
response = failure.value.response
if response.status in self.bypass_status_codes:
self.skipped.add(response.url[-3:])
return self.parse(response)
# it assumes there is a response attached to failure
self.failed.add(failure.value.response.url[-3:])
return failure
req = Request("http://scrapytest.org")
def _response(request: Request, status_code: int) -> Response:
return Response(request.url, status=status_code, request=request)
@pytest.fixture
def res200() -> Response:
return _response(req, 200)
@pytest.fixture
def res402() -> Response:
return _response(req, 402)
@pytest.fixture
def res404() -> Response:
return _response(req, 404)
| _HttpErrorSpider |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 11341,
"end": 11922
} | class ____(Generic[_T]):
def __init__(self, i: Optional[Iterable[_T]] = None) -> None: pass
def __iter__(self) -> Iterator[_T]: pass
def __len__(self) -> int: pass
def add(self, x: _T) -> None: pass
def remove(self, x: _T) -> None: pass
def discard(self, x: _T) -> None: pass
def clear(self) -> None: pass
def pop(self) -> _T: pass
def update(self, x: Iterable[_S]) -> None: pass
def __or__(self, s: Union[Set[_S], FrozenSet[_S]]) -> Set[Union[_T, _S]]: ...
def __xor__(self, s: Union[Set[_S], FrozenSet[_S]]) -> Set[Union[_T, _S]]: ...
| set |
python | scipy__scipy | scipy/optimize/tests/test__shgo.py | {
"start": 5880,
"end": 6321
} | class ____(StructTestFunction):
def f(self, x):
return (
-(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
- x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
)
g = None
cons = wrap_constraints(g)
test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)],
expected_fun=[-959.64066272085051],
expected_x=[512., 404.23180542])
| StructTest5 |
python | doocs__leetcode | solution/2200-2299/2283.Check if Number Has Equal Digit Count and Digit Value/Solution.py | {
"start": 0,
"end": 167
} | class ____:
def digitCount(self, num: str) -> bool:
cnt = Counter(int(x) for x in num)
return all(cnt[i] == int(x) for i, x in enumerate(num))
| Solution |
python | sympy__sympy | sympy/series/order.py | {
"start": 422,
"end": 19558
} | class ____(Expr):
r""" Represents the limiting behavior of some function.
Explanation
===========
The order of a function characterizes the function based on the limiting
behavior of the function as it goes to some limit. Only taking the limit
point to be a number is currently supported. This is expressed in
big O notation [1]_.
The formal definition for the order of a function `g(x)` about a point `a`
is such that `g(x) = O(f(x))` as `x \rightarrow a` if and only if there
exists a `\delta > 0` and an `M > 0` such that `|g(x)| \leq M|f(x)|` for
`|x-a| < \delta`. This is equivalent to `\limsup_{x \rightarrow a}
|g(x)/f(x)| < \infty`.
Let's illustrate it on the following example by taking the expansion of
`\sin(x)` about 0:
.. math ::
\sin(x) = x - x^3/3! + O(x^5)
where in this case `O(x^5) = x^5/5! - x^7/7! + \cdots`. By the definition
of `O`, there is a `\delta > 0` and an `M` such that:
.. math ::
|x^5/5! - x^7/7! + ....| <= M|x^5| \text{ for } |x| < \delta
or by the alternate definition:
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| < \infty
which surely is true, because
.. math ::
\lim_{x \rightarrow 0} | (x^5/5! - x^7/7! + ....) / x^5| = 1/5!
As it is usually used, the order of a function can be intuitively thought
of representing all terms of powers greater than the one specified. For
example, `O(x^3)` corresponds to any terms proportional to `x^3,
x^4,\ldots` and any higher power. For a polynomial, this leaves terms
proportional to `x^2`, `x` and constants.
Examples
========
>>> from sympy import O, oo, cos, pi
>>> from sympy.abc import x, y
>>> O(x + x**2)
O(x)
>>> O(x + x**2, (x, 0))
O(x)
>>> O(x + x**2, (x, oo))
O(x**2, (x, oo))
>>> O(1 + x*y)
O(1, x, y)
>>> O(1 + x*y, (x, 0), (y, 0))
O(1, x, y)
>>> O(1 + x*y, (x, oo), (y, oo))
O(x*y, (x, oo), (y, oo))
>>> O(1) in O(1, x)
True
>>> O(1, x) in O(1)
False
>>> O(x) in O(1, x)
True
>>> O(x**2) in O(x)
True
>>> O(x)*x
O(x**2)
>>> O(x) - O(x)
O(x)
>>> O(cos(x))
O(1)
>>> O(cos(x), (x, pi/2))
O(x - pi/2, (x, pi/2))
References
==========
.. [1] `Big O notation <https://en.wikipedia.org/wiki/Big_O_notation>`_
Notes
=====
In ``O(f(x), x)`` the expression ``f(x)`` is assumed to have a leading
term. ``O(f(x), x)`` is automatically transformed to
``O(f(x).as_leading_term(x),x)``.
``O(expr*f(x), x)`` is ``O(f(x), x)``
``O(expr, x)`` is ``O(1)``
``O(0, x)`` is 0.
Multivariate O is also supported:
``O(f(x, y), x, y)`` is transformed to
``O(f(x, y).as_leading_term(x,y).as_leading_term(y), x, y)``
In the multivariate case, it is assumed the limits w.r.t. the various
symbols commute.
If no symbols are passed then all symbols in the expression are used
and the limit point is assumed to be zero.
"""
is_Order = True
__slots__ = ()
@cacheit
def __new__(cls, expr, *args, **kwargs):
expr = sympify(expr)
if not args:
if expr.is_Order:
variables = expr.variables
point = expr.point
else:
variables = list(expr.free_symbols)
point = [S.Zero]*len(variables)
else:
args = list(args if is_sequence(args) else [args])
variables, point = [], []
if is_sequence(args[0]):
for a in args:
v, p = list(map(sympify, a))
variables.append(v)
point.append(p)
else:
variables = list(map(sympify, args))
point = [S.Zero]*len(variables)
if not all(v.is_symbol for v in variables):
raise TypeError('Variables are not symbols, got %s' % variables)
if len(list(uniq(variables))) != len(variables):
raise ValueError('Variables are supposed to be unique symbols, got %s' % variables)
if expr.is_Order:
expr_vp = dict(expr.args[1:])
new_vp = dict(expr_vp)
vp = dict(zip(variables, point))
for v, p in vp.items():
if v in new_vp.keys():
if p != new_vp[v]:
raise NotImplementedError(
"Mixing Order at different points is not supported.")
else:
new_vp[v] = p
if set(expr_vp.keys()) == set(new_vp.keys()):
return expr
else:
variables = list(new_vp.keys())
point = [new_vp[v] for v in variables]
if expr is S.NaN:
return S.NaN
if any(x in p.free_symbols for x in variables for p in point):
raise ValueError('Got %s as a point.' % point)
if variables:
if any(p != point[0] for p in point):
raise NotImplementedError(
"Multivariable orders at different points are not supported.")
if point[0] in (S.Infinity, S.Infinity*S.ImaginaryUnit):
s = {k: 1/Dummy() for k in variables}
rs = {1/v: 1/k for k, v in s.items()}
ps = [S.Zero for p in point]
elif point[0] in (S.NegativeInfinity, S.NegativeInfinity*S.ImaginaryUnit):
s = {k: -1/Dummy() for k in variables}
rs = {-1/v: -1/k for k, v in s.items()}
ps = [S.Zero for p in point]
elif point[0] is not S.Zero:
s = {k: Dummy() + point[0] for k in variables}
rs = {(v - point[0]).together(): k - point[0] for k, v in s.items()}
ps = [S.Zero for p in point]
else:
s = ()
rs = ()
ps = list(point)
expr = expr.subs(s)
if expr.is_Add:
expr = expr.factor()
if s:
args = tuple([r[0] for r in rs.items()])
else:
args = tuple(variables)
if len(variables) > 1:
# XXX: better way? We need this expand() to
# workaround e.g: expr = x*(x + y).
# (x*(x + y)).as_leading_term(x, y) currently returns
# x*y (wrong order term!). That's why we want to deal with
# expand()'ed expr (handled in "if expr.is_Add" branch below).
expr = expr.expand()
old_expr = None
while old_expr != expr:
old_expr = expr
if expr.is_Add:
lst = expr.extract_leading_order(args)
expr = Add(*[f.expr for (e, f) in lst])
elif expr:
try:
expr = expr.as_leading_term(*args)
except PoleError:
if isinstance(expr, Function) or\
all(isinstance(arg, Function) for arg in expr.args):
# It is not possible to simplify an expression
# containing only functions (which raise error on
# call to leading term) further
pass
else:
orders = []
pts = tuple(zip(args, ps))
for arg in expr.args:
try:
lt = arg.as_leading_term(*args)
except PoleError:
lt = arg
if lt not in args:
order = Order(lt)
else:
order = Order(lt, *pts)
orders.append(order)
if expr.is_Add:
new_expr = Order(Add(*orders), *pts)
if new_expr.is_Add:
new_expr = Order(Add(*[a.expr for a in new_expr.args]), *pts)
expr = new_expr.expr
elif expr.is_Mul:
expr = Mul(*[a.expr for a in orders])
elif expr.is_Pow:
e = expr.exp
b = expr.base
expr = exp(e * log(b))
# It would probably be better to handle this somewhere
# else. This is needed for a testcase in which there is a
# symbol with the assumptions zero=True.
if expr.is_zero:
expr = S.Zero
else:
expr = expr.as_independent(*args, as_Add=False)[1]
expr = expand_power_base(expr)
expr = expand_log(expr)
if len(args) == 1:
# The definition of O(f(x)) symbol explicitly stated that
# the argument of f(x) is irrelevant. That's why we can
# combine some power exponents (only "on top" of the
# expression tree for f(x)), e.g.:
# x**p * (-x)**q -> x**(p+q) for real p, q.
x = args[0]
margs = list(Mul.make_args(
expr.as_independent(x, as_Add=False)[1]))
for i, t in enumerate(margs):
if t.is_Pow:
b, q = t.args
if b in (x, -x) and q.is_real and not q.has(x):
margs[i] = x**q
elif b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
elif b.is_Mul and b.args[0] is S.NegativeOne:
b = -b
if b.is_Pow and not b.exp.has(x):
b, r = b.args
if b in (x, -x) and r.is_real:
margs[i] = x**(r*q)
expr = Mul(*margs)
expr = expr.subs(rs)
if expr.is_Order:
expr = expr.expr
if not expr.has(*variables) and not expr.is_zero:
expr = S.One
# create Order instance:
vp = dict(zip(variables, point))
variables.sort(key=default_sort_key)
point = [vp[v] for v in variables]
args = (expr,) + Tuple(*zip(variables, point))
obj = Expr.__new__(cls, *args)
return obj
def _eval_nseries(self, x, n, logx, cdir=0):
return self
@property
def expr(self):
return self.args[0]
@property
def variables(self):
if self.args[1:]:
return tuple(x[0] for x in self.args[1:])
else:
return ()
@property
def point(self):
if self.args[1:]:
return tuple(x[1] for x in self.args[1:])
else:
return ()
@property
def free_symbols(self):
return self.expr.free_symbols | set(self.variables)
def _eval_power(b, e):
if e.is_Number and e.is_nonnegative:
return b.func(b.expr ** e, *b.args[1:])
if e == O(1):
return b
return
def as_expr_variables(self, order_symbols):
if order_symbols is None:
order_symbols = self.args[1:]
else:
if (not all(o[1] == order_symbols[0][1] for o in order_symbols) and
not all(p == self.point[0] for p in self.point)): # pragma: no cover
raise NotImplementedError('Order at points other than 0 '
'or oo not supported, got %s as a point.' % self.point)
if order_symbols and order_symbols[0][1] != self.point[0]:
raise NotImplementedError(
"Multiplying Order at different points is not supported.")
order_symbols = dict(order_symbols)
for s, p in dict(self.args[1:]).items():
if s not in order_symbols.keys():
order_symbols[s] = p
order_symbols = sorted(order_symbols.items(), key=lambda x: default_sort_key(x[0]))
return self.expr, tuple(order_symbols)
def removeO(self):
return S.Zero
def getO(self):
return self
@cacheit
def contains(self, expr):
r"""
Return True if expr belongs to Order(self.expr, \*self.variables).
Return False if self belongs to expr.
Return None if the inclusion relation cannot be determined
(e.g. when self and expr have different symbols).
"""
expr = sympify(expr)
if expr.is_zero:
return True
if expr is S.NaN:
return False
point = self.point[0] if self.point else S.Zero
if expr.is_Order:
if (any(p != point for p in expr.point) or
any(p != point for p in self.point)):
return None
if expr.expr == self.expr:
# O(1) + O(1), O(1) + O(1, x), etc.
return all(x in self.args[1:] for x in expr.args[1:])
if expr.expr.is_Add:
return all(self.contains(x) for x in expr.expr.args)
if self.expr.is_Add and point.is_zero:
return any(self.func(x, *self.args[1:]).contains(expr)
for x in self.expr.args)
if self.variables and expr.variables:
common_symbols = tuple(
[s for s in self.variables if s in expr.variables])
elif self.variables:
common_symbols = self.variables
else:
common_symbols = expr.variables
if not common_symbols:
return None
if (self.expr.is_Pow and len(self.variables) == 1
and self.variables == expr.variables):
symbol = self.variables[0]
other = expr.expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
from sympy.simplify.powsimp import powsimp
r = None
ratio = self.expr/expr.expr
ratio = powsimp(ratio, deep=True, combine='exp')
for s in common_symbols:
from sympy.series.limits import Limit
l = Limit(ratio, s, point).doit(heuristics=False)
if not isinstance(l, Limit):
l = l != 0
else:
l = None
if r is None:
r = l
else:
if r != l:
return
return r
if self.expr.is_Pow and len(self.variables) == 1:
symbol = self.variables[0]
other = expr.as_independent(symbol, as_Add=False)[1]
if (other.is_Pow and other.base == symbol and
self.expr.base == symbol):
if point.is_zero:
rv = (self.expr.exp - other.exp).is_nonpositive
if point.is_infinite:
rv = (self.expr.exp - other.exp).is_nonnegative
if rv is not None:
return rv
obj = self.func(expr, *self.args[1:])
return self.contains(obj)
def __contains__(self, other):
result = self.contains(other)
if result is None:
raise TypeError('contains did not evaluate to a bool')
return result
def _eval_subs(self, old, new):
if old in self.variables:
newexpr = self.expr.subs(old, new)
i = self.variables.index(old)
newvars = list(self.variables)
newpt = list(self.point)
if new.is_symbol:
newvars[i] = new
else:
syms = new.free_symbols
if len(syms) == 1 or old in syms:
if old in syms:
var = self.variables[i]
else:
var = syms.pop()
# First, try to substitute self.point in the "new"
# expr to see if this is a fixed point.
# E.g. O(y).subs(y, sin(x))
from sympy import limit
if new.has(Order) and limit(new.getO().expr, var, new.getO().point[0]) == self.point[i]:
point = new.getO().point[0]
return Order(newexpr, *zip([var], [point]))
else:
point = new.subs(var, self.point[i])
if point != self.point[i]:
from sympy.solvers.solveset import solveset
d = Dummy()
sol = solveset(old - new.subs(var, d), d)
if isinstance(sol, Complement):
e1 = sol.args[0]
e2 = sol.args[1]
sol = set(e1) - set(e2)
res = [dict(zip((d, ), sol))]
point = d.subs(res[0]).limit(old, self.point[i])
newvars[i] = var
newpt[i] = point
elif old not in syms:
del newvars[i], newpt[i]
if not syms and new == self.point[i]:
newvars.extend(syms)
newpt.extend([S.Zero]*len(syms))
else:
return
return Order(newexpr, *zip(newvars, newpt))
def _eval_conjugate(self):
expr = self.expr._eval_conjugate()
if expr is not None:
return self.func(expr, *self.args[1:])
def _eval_derivative(self, x):
return self.func(self.expr.diff(x), *self.args[1:]) or self
def _eval_transpose(self):
expr = self.expr._eval_transpose()
if expr is not None:
return self.func(expr, *self.args[1:])
def __neg__(self):
return self
O = Order
| Order |
python | python-excel__xlrd | tests/test_cell.py | {
"start": 177,
"end": 1901
} | class ____(unittest.TestCase):
def setUp(self):
self.book = xlrd.open_workbook(from_sample('profiles.xls'), formatting_info=True)
self.sheet = self.book.sheet_by_name('PROFILEDEF')
def test_empty_cell(self):
sheet = self.book.sheet_by_name('TRAVERSALCHAINAGE')
cell = sheet.cell(0, 0)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_EMPTY)
self.assertEqual(cell.value, '')
self.assertEqual(type(cell.value), type(UNICODE_LITERAL('')))
self.assertTrue(cell.xf_index > 0)
def test_string_cell(self):
cell = self.sheet.cell(0, 0)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_TEXT)
self.assertEqual(cell.value, 'PROFIL')
self.assertEqual(type(cell.value), type(UNICODE_LITERAL('')))
self.assertTrue(cell.xf_index > 0)
def test_number_cell(self):
cell = self.sheet.cell(1, 1)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_NUMBER)
self.assertEqual(cell.value, 100)
self.assertTrue(cell.xf_index > 0)
def test_calculated_cell(self):
sheet2 = self.book.sheet_by_name('PROFILELEVELS')
cell = sheet2.cell(1, 3)
self.assertEqual(cell.ctype, xlrd.book.XL_CELL_NUMBER)
self.assertAlmostEqual(cell.value, 265.131, places=3)
self.assertTrue(cell.xf_index > 0)
def test_merged_cells(self):
book = xlrd.open_workbook(from_sample('xf_class.xls'), formatting_info=True)
sheet3 = book.sheet_by_name('table2')
row_lo, row_hi, col_lo, col_hi = sheet3.merged_cells[0]
self.assertEqual(sheet3.cell(row_lo, col_lo).value, 'MERGED')
self.assertEqual((row_lo, row_hi, col_lo, col_hi), (3, 7, 2, 5))
| TestCell |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_openapi.py | {
"start": 15104,
"end": 15183
} | class ____(Schema):
offset = fields.Int()
limit = fields.Int()
| PageSchema |
python | doocs__leetcode | solution/1200-1299/1232.Check If It Is a Straight Line/Solution.py | {
"start": 0,
"end": 298
} | class ____:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
x1, y1 = coordinates[0]
x2, y2 = coordinates[1]
for x, y in coordinates[2:]:
if (x - x1) * (y2 - y1) != (y - y1) * (x2 - x1):
return False
return True
| Solution |
python | kamyu104__LeetCode-Solutions | Python/jump-game-iv.py | {
"start": 50,
"end": 770
} | class ____(object):
def minJumps(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
groups = collections.defaultdict(list)
for i, x in enumerate(arr):
groups[x].append(i)
q = collections.deque([(0, 0)])
lookup = set([0])
while q:
pos, step = q.popleft()
if pos == len(arr)-1:
break
neighbors = set(groups[arr[pos]] + [pos-1, pos+1])
groups[arr[pos]] = []
for p in neighbors:
if p in lookup or not 0 <= p < len(arr):
continue
lookup.add(p)
q.append((p, step+1))
return step
| Solution |
python | redis__redis-py | redis/exceptions.py | {
"start": 550,
"end": 597
} | class ____(ResponseError):
pass
| NoScriptError |
python | getsentry__sentry | tests/sentry/integrations/jira_server/test_integration.py | {
"start": 45833,
"end": 51095
} | class ____(JiraServerIntegrationBaseTest):
def test_update_organization_config_sync_keys(self) -> None:
integration = self.create_provider_integration(provider="jira_server", name="Example Jira")
integration.add_organization(self.organization, self.user)
installation = integration.get_installation(self.organization.id)
# test validation
data = {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": {10100: {"on_resolve": "", "on_unresolve": "3"}},
}
with pytest.raises(IntegrationError):
installation.update_organization_config(data)
data = {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": {10100: {"on_resolve": "4", "on_unresolve": "3"}},
}
installation.update_organization_config(data)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.config == {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": True,
}
assert IntegrationExternalProject.objects.filter(
organization_integration_id=org_integration.id,
resolved_status="4",
unresolved_status="3",
).exists()
# test update existing
data = {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": {10100: {"on_resolve": "4", "on_unresolve": "5"}},
}
installation.update_organization_config(data)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.config == {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": True,
}
assert IntegrationExternalProject.objects.filter(
organization_integration_id=org_integration.id,
resolved_status="4",
unresolved_status="5",
).exists()
assert (
IntegrationExternalProject.objects.filter(
organization_integration_id=org_integration.id
).count()
== 1
)
# test disable forward
data = {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": {},
}
installation.update_organization_config(data)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.config == {
"sync_comments": True,
"sync_forward_assignment": True,
"sync_reverse_assignment": True,
"sync_status_reverse": True,
"sync_status_forward": False,
}
assert (
IntegrationExternalProject.objects.filter(
organization_integration_id=org_integration.id
).count()
== 0
)
def test_update_organization_config_issues_keys(self) -> None:
integration = self.create_provider_integration(provider="jira_server", name="Example Jira")
integration.add_organization(self.organization, self.user)
installation = integration.get_installation(self.organization.id)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert "issues_ignored_fields" not in org_integration.config
# Parses user-supplied CSV
installation.update_organization_config(
{"issues_ignored_fields": "\nhello world ,,\ngoodnight\nmoon , ,"}
)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.config.get("issues_ignored_fields") == [
"hello world",
"goodnight",
"moon",
]
# No-ops if updated value is not specified
installation.update_organization_config({})
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.config.get("issues_ignored_fields") == [
"hello world",
"goodnight",
"moon",
]
| JiraServerControlIntegrationTest |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 10414,
"end": 10959
} | class ____:
@property
def xtype(self):
"""Extended data type information."""
return self._xtype
@xtype.setter
def xtype(self, xtype):
if xtype is not None and not self._config.get("version_1_2_or_later"):
warn_or_raise(
W28, W28, ("xtype", self._element_name, "1.2"), self._config, self._pos
)
check_string(xtype, "xtype", self._config, self._pos)
self._xtype = xtype
@xtype.deleter
def xtype(self):
self._xtype = None
| _XtypeProperty |
python | kamyu104__LeetCode-Solutions | Python/split-and-merge-array-transformation.py | {
"start": 1311,
"end": 2440
} | class ____(object):
def minSplitMerge(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
def bfs(start, target):
def adj(arr):
for l in xrange(len(arr)):
for r in xrange(l, len(arr)):
sub = arr[l:r+1]
rem = arr[:l]+arr[r+1:]
for i in xrange(len(rem)+1):
if i == l:
continue
yield rem[:i]+sub+rem[i:]
d = 0
lookup = {start}
q = [start]
while q:
new_q = []
for u in q:
if u == target:
return d
for v in adj(u):
if v in lookup:
continue
lookup.add(v)
new_q.append(v)
q = new_q
d += 1
return -1
return bfs(tuple(nums1), tuple(nums2))
| Solution2 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/definition/asset_graph_computation.py | {
"start": 10694,
"end": 12740
} | class ____:
"""A graph where each node is a NodeOutputHandle corresponding to an op. There's an edge from
op_output_1 to op_output_2 if op_output_2 is part of an op that has an input that's connected to
op_output_1.
"""
op_output_handles: AbstractSet[NodeOutputHandle]
upstream: Mapping[NodeOutputHandle, AbstractSet[NodeOutputHandle]]
downstream: Mapping[NodeOutputHandle, AbstractSet[NodeOutputHandle]]
@staticmethod
def from_graph(graph_def: "GraphDefinition") -> "OpOutputHandleGraph":
op_output_handles = graph_def.get_op_output_handles(None)
input_output_pairs = graph_def.get_op_input_output_handle_pairs(None)
op_output_handles_by_op_handle: dict[NodeHandle, set[NodeOutputHandle]] = defaultdict(set)
for op_output_handle in op_output_handles:
op_output_handles_by_op_handle[op_output_handle.node_handle].add(op_output_handle)
downstream_op_output_handles_by_op_output_handle: dict[
NodeOutputHandle, set[NodeOutputHandle]
] = defaultdict(set)
upstream_op_output_handles_by_op_output_handle: dict[
NodeOutputHandle, set[NodeOutputHandle]
] = defaultdict(set)
for op_output_handle, op_input_handle in input_output_pairs:
downstream_op_output_handles = op_output_handles_by_op_handle[
op_input_handle.node_handle
]
for downstream_op_output_handle in downstream_op_output_handles:
upstream_op_output_handles_by_op_output_handle[downstream_op_output_handle].add(
op_output_handle
)
downstream_op_output_handles_by_op_output_handle[op_output_handle].add(
downstream_op_output_handle
)
return OpOutputHandleGraph(
op_output_handles=op_output_handles,
downstream=downstream_op_output_handles_by_op_output_handle,
upstream=upstream_op_output_handles_by_op_output_handle,
)
| OpOutputHandleGraph |
python | getsentry__sentry | src/sentry/deletions/defaults/pullrequest.py | {
"start": 256,
"end": 823
} | class ____(ModelDeletionTask[PullRequest]):
def get_query_filter(self) -> Q:
"""
Returns a Q object that filters for unused PRs.
"""
cutoff = datetime.now(timezone.utc) - timedelta(days=90)
return PullRequest.get_unused_filter(cutoff)
def get_child_relations(self, instance: PullRequest) -> list[BaseRelation]:
return [
ModelRelation(PullRequestComment, {"pull_request_id": instance.id}),
ModelRelation(PullRequestCommit, {"pull_request_id": instance.id}),
]
| PullRequestDeletionTask |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 323283,
"end": 332114
} | class ____:
def test_trivial(self):
# A trivial test of stats.f_oneway, with F=0.
F, p = stats.f_oneway([0, 2], [0, 2])
assert_equal(F, 0.0)
assert_equal(p, 1.0)
def test_basic(self):
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
F, p = stats.f_oneway([0, 2], [2, 4])
assert_equal(F, 2.0)
assert_allclose(p, 1 - np.sqrt(0.5), rtol=1e-14)
def test_unequal_var(self):
# toy samples with unequal variances and different observations
samples = [[-50.42, 40.31, -18.09, 35.58, -6.8, 0.22],
[23.44, 4.5, 15.1, 9.66],
[11.94, 11.1 , 9.87, 9.09, 3.33]]
F, p = stats.f_oneway(*samples, equal_var=False)
# R language as benchmark
# group1 <- c(-50.42, 40.31, -18.09, 35.58, -6.8, 0.22)
# group2 <- c(23.44, 4.5, 15.1, 9.66)
# group3 <- c(11.94, 11.1 , 9.87, 9.09, 3.33)
#
# data <- data.frame(
# value = c(group1, group2, group3),
# group = factor(c(rep("G1", length(group1)),
# rep("G2", length(group2)),
# rep("G3", length(group3))))
# )
# welch_anova <- oneway.test(value ~ group, data = data, var.equal = FALSE)
# welch_anova$statistic
## F: 0.609740409019517
# welch_anova$p.value
## 0.574838941286302
assert_allclose(F, 0.609740409019517, rtol=1e-14)
assert_allclose(p, 0.574838941286302, rtol=1e-14)
def test_equal_var_input_validation(self):
samples = [[-50.42, 40.31, -18.09, 35.58, -6.8, 0.22],
[23.44, 4.5, 15.1, 9.66],
[11.94, 11.1 , 9.87, 9.09, 3.33]]
message = "Expected a boolean value for 'equal_var'"
with pytest.raises(TypeError, match=message):
stats.f_oneway(*samples, equal_var="False")
def test_known_exact(self):
# Another trivial dataset for which the exact F and p can be
# calculated on most platforms
F, p = stats.f_oneway([2], [2], [2, 3, 4])
assert_allclose(F, 3/5, rtol=1e-15) # assert_equal fails on some CI platforms
assert_allclose(p, 5/8, rtol=1e-15)
def test_large_integer_array(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
F, p = stats.f_oneway(a, b)
# The expected value was verified by computing it with mpmath with
# 40 digits of precision.
assert_allclose(F, 0.77450216931805540, rtol=1e-14)
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = stats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nist(self):
# These are the nist ANOVA files. They can be found at:
# https://www.itl.nist.gov/div898/strd/anova/anova.html
filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',
'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',
'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']
for test_case in filenames:
rtol = 1e-7
fname = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/nist_anova', test_case))
with open(fname) as f:
content = f.read().split('\n')
certified = [line.split() for line in content[40:48]
if line.strip()]
dataf = np.loadtxt(fname, skiprows=60)
y, x = dataf.T
y = y.astype(int)
caty = np.unique(y)
f = float(certified[0][-1])
xlist = [x[y == i] for i in caty]
res = stats.f_oneway(*xlist)
# With the hard test cases we relax the tolerance a bit.
hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')
if test_case in hard_tc:
rtol = 1e-4
assert_allclose(res[0], f, rtol=rtol,
err_msg=f'Failing testcase: {test_case}')
@pytest.mark.parametrize("a, b, expected", [
(np.array([42, 42, 42]), np.array([7, 7, 7]), (np.inf, 0)),
(np.array([42, 42, 42]), np.array([42, 42, 42]), (np.nan, np.nan))
])
def test_constant_input(self, a, b, expected):
# For more details, look on https://github.com/scipy/scipy/issues/11669
msg = "Each of the input arrays is constant;"
with pytest.warns(stats.ConstantInputWarning, match=msg):
f, p = stats.f_oneway(a, b)
assert f, p == expected
@pytest.mark.parametrize('axis', [-2, -1, 0, 1])
def test_2d_inputs(self, axis):
a = np.array([[1, 4, 3, 3],
[2, 5, 3, 3],
[3, 6, 3, 3],
[2, 3, 3, 3],
[1, 4, 3, 3]])
b = np.array([[3, 1, 5, 3],
[4, 6, 5, 3],
[4, 3, 5, 3],
[1, 5, 5, 3],
[5, 5, 5, 3],
[2, 3, 5, 3],
[8, 2, 5, 3],
[2, 2, 5, 3]])
c = np.array([[4, 3, 4, 3],
[4, 2, 4, 3],
[5, 4, 4, 3],
[5, 4, 4, 3]])
if axis in [-1, 1]:
a = a.T
b = b.T
c = c.T
take_axis = 0
else:
take_axis = 1
warn_msg = "Each of the input arrays is constant;"
with pytest.warns(stats.ConstantInputWarning, match=warn_msg):
f, p = stats.f_oneway(a, b, c, axis=axis)
# Verify that the result computed with the 2d arrays matches
# the result of calling f_oneway individually on each slice.
for j in [0, 1]:
fj, pj = stats.f_oneway(np.take(a, j, take_axis),
np.take(b, j, take_axis),
np.take(c, j, take_axis))
assert_allclose(f[j], fj, rtol=1e-14)
assert_allclose(p[j], pj, rtol=1e-14)
for j in [2, 3]:
with pytest.warns(stats.ConstantInputWarning, match=warn_msg):
fj, pj = stats.f_oneway(np.take(a, j, take_axis),
np.take(b, j, take_axis),
np.take(c, j, take_axis))
assert_equal(f[j], fj)
assert_equal(p[j], pj)
def test_3d_inputs(self):
# Some 3-d arrays. (There is nothing special about the values.)
a = 1/np.arange(1.0, 4*5*7 + 1).reshape(4, 5, 7)
b = 2/np.arange(1.0, 4*8*7 + 1).reshape(4, 8, 7)
c = np.cos(1/np.arange(1.0, 4*4*7 + 1).reshape(4, 4, 7))
f, p = stats.f_oneway(a, b, c, axis=1)
assert f.shape == (4, 7)
assert p.shape == (4, 7)
for i in range(a.shape[0]):
for j in range(a.shape[2]):
fij, pij = stats.f_oneway(a[i, :, j], b[i, :, j], c[i, :, j])
assert_allclose(fij, f[i, j])
assert_allclose(pij, p[i, j])
def test_length0_1d_error(self):
# Require at least one value in each group.
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
result = stats.f_oneway([1, 2, 3], [], [4, 5, 6, 7])
assert_equal(result, (np.nan, np.nan))
def test_length0_2d_error(self):
with pytest.warns(SmallSampleWarning, match=too_small_nd_not_omit):
ncols = 3
a = np.ones((4, ncols))
b = np.ones((0, ncols))
c = np.ones((5, ncols))
f, p = stats.f_oneway(a, b, c)
nans = np.full((ncols,), fill_value=np.nan)
assert_equal(f, nans)
assert_equal(p, nans)
def test_all_length_one(self):
with pytest.warns(SmallSampleWarning):
result = stats.f_oneway([10], [11], [12], [13])
assert_equal(result, (np.nan, np.nan))
@pytest.mark.parametrize('args', [(), ([1, 2, 3],)])
def test_too_few_inputs(self, args):
message = "At least two samples are required..."
with assert_raises(TypeError, match=message):
stats.f_oneway(*args)
def test_axis_error(self):
a = np.ones((3, 4))
b = np.ones((5, 4))
with assert_raises(AxisError):
stats.f_oneway(a, b, axis=2)
def test_bad_shapes(self):
a = np.ones((3, 4))
b = np.ones((5, 4))
with assert_raises(ValueError):
stats.f_oneway(a, b, axis=1)
@make_xp_test_case(stats.kruskal)
| TestFOneWay |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gcs/source_gcs/config.py | {
"start": 1059,
"end": 1626
} | class ____(BaseModel):
class Config(OneOfOptionConfig):
title = "Service Account Authentication."
auth_type: Literal["Service"] = Field("Service", const=True)
service_account: str = Field(
title="Service Account Information.",
airbyte_secret=True,
description=(
'Enter your Google Cloud <a href="https://cloud.google.com/iam/docs/'
'creating-managing-service-account-keys#creating_service_account_keys">'
"service account key</a> in JSON format"
),
)
| ServiceAccountCredentials |
python | scipy__scipy | scipy/spatial/tests/test_distance.py | {
"start": 28092,
"end": 57809
} | class ____:
def setup_method(self):
self.rnd_eo_names = ['random-float32-data', 'random-int-data',
'random-uint-data', 'random-double-data',
'random-bool-data']
self.valid_upcasts = {'bool': [np_ulong, np_long, np.float32, np.float64],
'uint': [np_long, np.float32, np.float64],
'int': [np.float32, np.float64],
'float32': [np.float64]}
def test_pdist_extra_args(self, metric):
# Tests that args and kwargs are correctly handled
X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]]
kwargs = {"N0tV4l1D_p4raM": 3.14, "w": np.arange(2)}
args = [3.14] * 200
with pytest.raises(TypeError):
pdist(X1, metric=metric, **kwargs)
with pytest.raises(TypeError):
pdist(X1, metric=eval(metric), **kwargs)
with pytest.raises(TypeError):
pdist(X1, metric="test_" + metric, **kwargs)
with pytest.raises(TypeError):
pdist(X1, metric=metric, *args)
with pytest.raises(TypeError):
pdist(X1, metric=eval(metric), *args)
with pytest.raises(TypeError):
pdist(X1, metric="test_" + metric, *args)
def test_pdist_extra_args_custom(self):
# Tests that args and kwargs are correctly handled
# also for custom metric
def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
return arg + kwarg + kwarg2
X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]]
kwargs = {"N0tV4l1D_p4raM": 3.14, "w": np.arange(2)}
args = [3.14] * 200
with pytest.raises(TypeError):
pdist(X1, _my_metric)
with pytest.raises(TypeError):
pdist(X1, _my_metric, *args)
with pytest.raises(TypeError):
pdist(X1, _my_metric, **kwargs)
with pytest.raises(TypeError):
pdist(X1, _my_metric, kwarg=2.2, kwarg2=3.3)
with pytest.raises(TypeError):
pdist(X1, _my_metric, 1, 2, kwarg=2.2)
with pytest.raises(TypeError):
pdist(X1, _my_metric, 1, 2, kwarg=2.2)
with pytest.raises(TypeError):
pdist(X1, _my_metric, 1.1, 2.2, 3.3)
with pytest.raises(TypeError):
pdist(X1, _my_metric, 1.1, 2.2)
with pytest.raises(TypeError):
pdist(X1, _my_metric, 1.1)
with pytest.raises(TypeError):
pdist(X1, _my_metric, 1.1, kwarg=2.2, kwarg2=3.3)
# these should work
assert_allclose(pdist(X1, metric=_my_metric,
arg=1.1, kwarg2=3.3), 5.4)
def test_pdist_euclidean_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_euclidean_random_u(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_euclidean_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_euclidean_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = wpdist_no_const(X, 'test_euclidean')
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_euclidean_iris_double(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = wpdist_no_const(X, 'euclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_euclidean_iris_float32(self):
eps = 1e-5
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = wpdist_no_const(X, 'euclidean')
assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
@pytest.mark.slow
def test_pdist_euclidean_iris_nonC(self):
# Test pdist(X, 'test_euclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = wpdist_no_const(X, 'test_euclidean')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_seuclidean_random(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_seuclidean_random_float32(self):
eps = 1e-7
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
# Check no error is raise when V has float32 dtype (#11171).
V = np.var(X, axis=0, ddof=1)
Y_test2 = pdist(X, 'seuclidean', V=V)
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_seuclidean_random_nonC(self):
# Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_seuclidean')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_seuclidean_iris(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_seuclidean_iris_float32(self):
# Tests pdist(X, 'seuclidean') on the Iris data set (float32).
eps = 1e-5
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_seuclidean_iris_nonC(self):
# Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_seuclidean')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_cosine_random(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = wpdist(X, 'cosine')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_cosine_random_float32(self):
eps = 1e-7
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = wpdist(X, 'cosine')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_cosine_random_nonC(self):
# Test pdist(X, 'test_cosine') [the non-C implementation]
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = wpdist(X, 'test_cosine')
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_cosine_iris(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = wpdist(X, 'cosine')
assert_allclose(Y_test1, Y_right, atol=eps)
@pytest.mark.slow
def test_pdist_cosine_iris_float32(self):
eps = 1e-05
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = wpdist(X, 'cosine')
assert_allclose(Y_test1, Y_right, atol=eps, verbose=verbose > 2)
@pytest.mark.slow
def test_pdist_cosine_iris_nonC(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = wpdist(X, 'test_cosine')
assert_allclose(Y_test2, Y_right, atol=eps)
def test_pdist_cosine_bounds(self):
# Test adapted from @joernhees's example at gh-5208: case where
# cosine distance used to be negative. XXX: very sensitive to the
# specific norm computation.
x = np.abs(np.random.RandomState(1337).rand(91))
X = np.vstack([x, x])
assert_(wpdist(X, 'cosine')[0] >= 0,
msg='cosine distance should be non-negative')
def test_pdist_cityblock_random(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = wpdist_no_const(X, 'cityblock')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_cityblock_random_float32(self):
eps = 1e-7
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = wpdist_no_const(X, 'cityblock')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_cityblock_random_nonC(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = wpdist_no_const(X, 'test_cityblock')
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_cityblock_iris(self):
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = wpdist_no_const(X, 'cityblock')
assert_allclose(Y_test1, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_cityblock_iris_float32(self):
eps = 1e-5
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = wpdist_no_const(X, 'cityblock')
assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
@pytest.mark.slow
def test_pdist_cityblock_iris_nonC(self):
# Test pdist(X, 'test_cityblock') [the non-C implementation] on the
# Iris data set.
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = wpdist_no_const(X, 'test_cityblock')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_correlation_random(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = wpdist(X, 'correlation')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_correlation_random_float32(self):
eps = 1e-7
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = wpdist(X, 'correlation')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_correlation_random_nonC(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = wpdist(X, 'test_correlation')
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_correlation_iris(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = wpdist(X, 'correlation')
assert_allclose(Y_test1, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_correlation_iris_float32(self):
eps = 1e-7
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = wpdist(X, 'correlation')
assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
@pytest.mark.slow
def test_pdist_correlation_iris_nonC(self):
if sys.maxsize > 2**32:
eps = 1e-7
else:
pytest.skip("see gh-16456")
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = wpdist(X, 'test_correlation')
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.parametrize("p", [0.1, 0.25, 1.0, 2.0, 3.2, np.inf])
def test_pdist_minkowski_random_p(self, p):
eps = 1e-13
X = eo['pdist-double-inp']
Y1 = wpdist_no_const(X, 'minkowski', p=p)
Y2 = wpdist_no_const(X, 'test_minkowski', p=p)
assert_allclose(Y1, Y2, atol=0, rtol=eps)
def test_pdist_minkowski_random(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_minkowski_random_float32(self):
eps = 1e-7
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_minkowski_random_nonC(self):
eps = 1e-7
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_minkowski_3_2_iris(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
assert_allclose(Y_test1, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_minkowski_3_2_iris_float32(self):
eps = 1e-5
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
assert_allclose(Y_test1, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_minkowski_3_2_iris_nonC(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
assert_allclose(Y_test2, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_minkowski_5_8_iris(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
assert_allclose(Y_test1, Y_right, rtol=eps)
@pytest.mark.slow
def test_pdist_minkowski_5_8_iris_float32(self):
eps = 1e-5
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
@pytest.mark.slow
def test_pdist_minkowski_5_8_iris_nonC(self):
eps = 1e-7
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=5.8)
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_mahalanobis(self):
# 1-dimensional observations
x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
dist = pdist(x, metric='mahalanobis')
assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),
np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])
# 2-dimensional observations
x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])
dist = pdist(x, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2 * rt2, 2, 2, 2 * rt2, 2])
# Too few observations
with pytest.raises(ValueError):
wpdist([[0, 1], [2, 3]], metric='mahalanobis')
def test_pdist_hamming_random(self):
eps = 1e-15
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_hamming_random_float32(self):
eps = 1e-15
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_hamming_random_nonC(self):
eps = 1e-15
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = wpdist(X, 'test_hamming')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_dhamming_random(self):
eps = 1e-15
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_dhamming_random_float32(self):
eps = 1e-15
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_dhamming_random_nonC(self):
eps = 1e-15
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = wpdist(X, 'test_hamming')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_jensenshannon_random(self):
eps = 1e-11
X = eo['pdist-double-inp']
Y_right = eo['pdist-jensenshannon']
Y_test1 = pdist(X, 'jensenshannon')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_jensenshannon_random_float32(self):
eps = 1e-8
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-jensenshannon']
Y_test1 = pdist(X, 'jensenshannon')
assert_allclose(Y_test1, Y_right, rtol=eps, verbose=verbose > 2)
def test_pdist_jensenshannon_random_nonC(self):
eps = 1e-11
X = eo['pdist-double-inp']
Y_right = eo['pdist-jensenshannon']
Y_test2 = pdist(X, 'test_jensenshannon')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_jensenshannon_iris(self):
if _is_32bit():
# Test failing on 32-bit Linux on Azure otherwise, see gh-12810
eps = 2.5e-10
else:
eps = 1e-12
X = eo['iris']
Y_right = eo['pdist-jensenshannon-iris']
Y_test1 = pdist(X, 'jensenshannon')
assert_allclose(Y_test1, Y_right, atol=eps)
def test_pdist_jensenshannon_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-jensenshannon-iris']
Y_test1 = pdist(X, 'jensenshannon')
assert_allclose(Y_test1, Y_right, atol=eps, verbose=verbose > 2)
def test_pdist_jensenshannon_iris_nonC(self):
eps = 5e-5
X = eo['iris']
Y_right = eo['pdist-jensenshannon-iris']
Y_test2 = pdist(X, 'test_jensenshannon')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_matching_mtica1(self):
# Test matching(*,*) with mtica example #1 (nums).
m = wmatching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wmatching(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_matching_mtica2(self):
# Test matching(*,*) with mtica example #2.
m = wmatching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wmatching(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_pdist_yule_mtica1(self):
m = wyule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wyule(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_mtica2(self):
m = wyule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wyule(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_dice_mtica1(self):
m = wdice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wdice(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 7, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 7, rtol=0, atol=1e-10)
def test_pdist_dice_mtica2(self):
m = wdice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wdice(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 0.5, rtol=0, atol=1e-10)
assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica1(self):
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica2(self):
m = wsokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wsokalsneath(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica1(self):
m = wrogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wrogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
m = wrogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wrogerstanimoto(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica1(self):
m = wrussellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wrussellrao(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica2(self):
m = wrussellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wrussellrao(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
@pytest.mark.slow
def test_pdist_canberra_match(self):
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-15
y1 = wpdist_no_const(D, "canberra")
y2 = wpdist_no_const(D, "test_canberra")
assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
def test_pdist_canberra_ticket_711(self):
# Test pdist(X, 'canberra') to see if Canberra gives the right result
# as reported on gh-1238.
eps = 1e-8
pdist_y = wpdist_no_const(([3.3], [3.4]), "canberra")
right_y = 0.01492537
assert_allclose(pdist_y, right_y, atol=eps, verbose=verbose > 2)
@skip_xp_invalid_arg
def test_pdist_custom_notdouble(self):
# tests that when using a custom metric the data type is not altered
class myclass:
pass
def _my_metric(x, y):
if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
raise ValueError("Type has been changed")
return 1.123
data = np.array([[myclass()], [myclass()]], dtype=object)
pdist_y = pdist(data, metric=_my_metric)
right_y = 1.123
assert_equal(pdist_y, right_y, verbose=verbose > 2)
def _check_calling_conventions(self, X, metric, eps=1e-07, **kwargs):
# helper function for test_pdist_calling_conventions
try:
y1 = pdist(X, metric=metric, **kwargs)
y2 = pdist(X, metric=eval(metric), **kwargs)
y3 = pdist(X, metric="test_" + metric, **kwargs)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
with pytest.raises(e_cls):
pdist(X, metric=metric, **kwargs)
with pytest.raises(e_cls):
pdist(X, metric=eval(metric), **kwargs)
with pytest.raises(e_cls):
pdist(X, metric="test_" + metric, **kwargs)
else:
assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
assert_allclose(y1, y3, rtol=eps, verbose=verbose > 2)
def test_pdist_calling_conventions(self, metric):
# Ensures that specifying the metric with a str or scipy function
# gives the same behaviour (i.e. same result or same exception).
# NOTE: The correctness should be checked within each metric tests.
# NOTE: Extra args should be checked with a dedicated test
for eo_name in self.rnd_eo_names:
# subsampling input data to speed-up tests
# NOTE: num samples needs to be > than dimensions for mahalanobis
X = eo[eo_name][::5, ::2]
if verbose > 2:
print("testing: ", metric, " with: ", eo_name)
if metric in {'dice', 'yule', 'matching', 'rogerstanimoto', 'russellrao',
'sokalsneath'} and 'bool' not in eo_name:
# python version permits non-bools e.g. for fuzzy logic
continue
self._check_calling_conventions(X, metric)
# Testing built-in metrics with extra args
if metric == "seuclidean":
V = np.var(X.astype(np.float64), axis=0, ddof=1)
self._check_calling_conventions(X, metric, V=V)
elif metric == "mahalanobis":
V = np.atleast_2d(np.cov(X.astype(np.float64).T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X, metric, VI=VI)
def test_pdist_dtype_equivalence(self, metric):
# Tests that the result is not affected by type up-casting
eps = 1e-07
tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
(eo['random-uint-data'], self.valid_upcasts['uint']),
(eo['random-int-data'], self.valid_upcasts['int']),
(eo['random-float32-data'], self.valid_upcasts['float32'])]
for test in tests:
X1 = test[0][::5, ::2]
try:
y1 = pdist(X1, metric=metric)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
for new_type in test[1]:
X2 = new_type(X1)
with pytest.raises(e_cls):
pdist(X2, metric=metric)
else:
for new_type in test[1]:
y2 = pdist(new_type(X1), metric=metric)
assert_allclose(y1, y2, rtol=eps, verbose=verbose > 2)
def test_pdist_out(self, metric):
# Test that out parameter works properly
eps = 1e-15
X = eo['random-float32-data'][::5, ::2]
out_size = int((X.shape[0] * (X.shape[0] - 1)) / 2)
kwargs = dict()
if metric == 'minkowski':
kwargs['p'] = 1.23
out1 = np.empty(out_size, dtype=np.float64)
Y_right = pdist(X, metric, **kwargs)
Y_test1 = pdist(X, metric, out=out1, **kwargs)
# test that output is numerically equivalent
assert_allclose(Y_test1, Y_right, rtol=eps)
# test that Y_test1 and out1 are the same object
assert_(Y_test1 is out1)
# test for incorrect shape
out2 = np.empty(out_size + 3, dtype=np.float64)
with pytest.raises(ValueError):
pdist(X, metric, out=out2, **kwargs)
# test for (C-)contiguous output
out3 = np.empty(2 * out_size, dtype=np.float64)[::2]
with pytest.raises(ValueError):
pdist(X, metric, out=out3, **kwargs)
# test for incorrect dtype
out5 = np.empty(out_size, dtype=np.int64)
with pytest.raises(ValueError):
pdist(X, metric, out=out5, **kwargs)
def test_striding(self, metric):
# test that striding is handled correct with calls to
# _copy_array_if_base_present
eps = 1e-15
X = eo['random-float32-data'][::5, ::2]
X_copy = X.copy()
# confirm contiguity
assert_(not X.flags.c_contiguous)
assert_(X_copy.flags.c_contiguous)
kwargs = dict()
if metric == 'minkowski':
kwargs['p'] = 1.23
Y1 = pdist(X, metric, **kwargs)
Y2 = pdist(X_copy, metric, **kwargs)
# test that output is numerically equivalent
assert_allclose(Y1, Y2, rtol=eps, verbose=verbose > 2)
| TestPdist |
python | keon__algorithms | tests/test_sort.py | {
"start": 3449,
"end": 4137
} | class ____(unittest.TestCase):
def setUp(self):
self.depGraph = {
"a": ["b"],
"b": ["c"],
"c": ['e'],
'e': ['g'],
"d": [],
"f": ["e", "d"],
"g": []
}
def test_topsort(self):
res = top_sort_recursive(self.depGraph)
# print(res)
self.assertTrue(res.index('g') < res.index('e'))
res = top_sort(self.depGraph)
self.assertTrue(res.index('g') < res.index('e'))
if __name__ == "__main__":
unittest.main()
| TestTopSort |
python | neetcode-gh__leetcode | python/1582-special-positions-in-a-binary-matrix.py | {
"start": 0,
"end": 545
} | class ____:
def numSpecial(self, mat: List[List[int]]) -> int:
m = len(mat)
n = len(mat[0])
rowCount = [0] * m
colCount = [0] * n
res = 0
for r in range(m):
for c in range(n):
if mat[r][c] == 1:
rowCount[r] += 1
colCount[c] += 1
for r in range(m):
for c in range(n):
if mat[r][c] == 1 and rowCount[r] == 1 and colCount[c] == 1:
res += 1
return res
| Solution |
python | spyder-ide__spyder | spyder/widgets/findreplace.py | {
"start": 1623,
"end": 31555
} | class ____(QWidget, SpyderShortcutsMixin):
"""Find widget"""
# For shortcuts
CONF_SECTION = 'find_replace'
TOOLTIP = {
'regexp_error': _("Regular expression error"),
'no_matches': _("No matches")
}
visibility_changed = Signal(bool)
return_shift_pressed = Signal()
return_pressed = Signal()
def __init__(self, parent, enable_replace=False):
if not PYSIDE2:
super().__init__(parent)
else:
QWidget.__init__(self, parent)
SpyderShortcutsMixin.__init__(self)
self.enable_replace = enable_replace
self.editor = None
self.is_code_editor = None
glayout = QGridLayout()
glayout.setContentsMargins(
2 * AppStyle.MarginSize,
AppStyle.MarginSize,
2 * AppStyle.MarginSize,
0
)
if sys.platform == "darwin":
# Spacing is too big on Mac, which makes the widget look bad
glayout.setSpacing(2 * AppStyle.MarginSize)
self.setLayout(glayout)
self.close_button = create_toolbutton(
self,
triggered=self.hide,
icon=ima.icon('DialogCloseButton')
)
glayout.addWidget(self.close_button, 0, 0)
# Icon size is the same for all buttons
self.icon_size = self.close_button.iconSize()
# Find layout
self.search_text = SearchText(self)
self.return_shift_pressed.connect(
lambda:
self.find(changed=False, forward=False, rehighlight=False,
multiline_replace_check = False)
)
self.return_pressed.connect(
lambda:
self.find(changed=False, forward=True, rehighlight=False,
multiline_replace_check = False)
)
self.search_text.lineEdit().textEdited.connect(
self.text_has_been_edited)
self.search_text.sig_resized.connect(self._resize_replace_text)
self.number_matches_text = QLabel(self)
self.search_text.lineEdit().clear_action.triggered.connect(
self.clear_matches
)
self.hide_number_matches_text = False
self.number_matches_pixmap = (
ima.icon('number_matches').pixmap(self.icon_size)
)
self.matches_string = ""
self.no_matches_icon = ima.icon('no_matches')
self.error_icon = ima.icon('error')
self.messages_action = QAction(self)
self.messages_action.setVisible(False)
self.search_text.lineEdit().addAction(
self.messages_action, QLineEdit.TrailingPosition)
# Button corresponding to the messages_action above
self.messages_button = (
self.search_text.lineEdit().findChildren(QToolButton)[1]
)
self.replace_on = False
self.replace_text_button = create_toolbutton(
self,
toggled=self.change_replace_state,
icon=ima.icon('replace'),
tip=_("Replace text")
)
if not self.enable_replace:
self.replace_text_button.hide()
self.previous_button = create_toolbutton(
self,
triggered=self.find_previous,
icon=ima.icon('findprevious'),
tip=_("Find previous")
)
self.next_button = create_toolbutton(
self,
triggered=self.find_next,
icon=ima.icon('findnext'),
tip=_("Find next")
)
self.next_button.clicked.connect(self.update_search_combo)
self.previous_button.clicked.connect(self.update_search_combo)
self.re_button = create_toolbutton(
self, icon=ima.icon('regex'),
tip=_("Use regular expressions")
)
self.re_button.setCheckable(True)
self.re_button.toggled.connect(lambda state: self.find())
self.case_button = create_toolbutton(
self,
icon=ima.icon("format_letter_case"),
tip=_("Enable case sensitive searches")
)
self.case_button.setCheckable(True)
self.case_button.toggled.connect(lambda state: self.find())
self.words_button = create_toolbutton(
self,
icon=ima.icon("format_letter_matches"),
tip=_("Only search for whole words")
)
self.words_button.setCheckable(True)
self.words_button.toggled.connect(lambda state: self.find())
self.widgets = [
self.close_button,
self.search_text,
self.previous_button,
self.next_button,
self.re_button,
self.case_button,
self.words_button,
self.replace_text_button,
self.number_matches_text,
]
# Search layout
search_layout = QHBoxLayout()
for widget in self.widgets[1:-1]:
search_layout.addWidget(widget)
search_layout.addSpacerItem(QSpacerItem(10, 0))
search_layout.addWidget(self.number_matches_text)
search_layout.addSpacerItem(
QSpacerItem(6, 0, QSizePolicy.Expanding)
)
glayout.addLayout(search_layout, 0, 1)
# Replace layout
self.replace_text = PatternComboBox(
self,
adjust_to_minimum=False
)
self.replace_text.valid.connect(
lambda _: self.replace_find(focus_replace_text=True))
self.replace_text.lineEdit().setPlaceholderText(_("Replace"))
self.replace_button = create_toolbutton(
self,
tip=_('Replace next occurrence'),
icon=ima.icon('replace_next'),
triggered=self.replace_find,
)
self.replace_sel_button = create_toolbutton(
self,
tip=_('Replace occurrences in selection'),
icon=ima.icon('replace_selection'),
triggered=self.replace_find_selection,
)
self.replace_sel_button.clicked.connect(self.update_replace_combo)
self.replace_sel_button.clicked.connect(self.update_search_combo)
self.replace_all_button = create_toolbutton(
self,
tip=_('Replace all occurrences'),
icon=ima.icon('replace_all'),
triggered=self.replace_find_all,
)
self.replace_all_button.clicked.connect(self.update_replace_combo)
self.replace_all_button.clicked.connect(self.update_search_combo)
replace_layout = QHBoxLayout()
widgets = [
self.replace_text,
self.replace_button,
self.replace_sel_button,
self.replace_all_button
]
for widget in widgets:
replace_layout.addWidget(widget)
replace_layout.addStretch(1)
glayout.addLayout(replace_layout, 1, 1)
self.widgets.extend(widgets)
self.replace_widgets = widgets
self.hide_replace()
# Additional adjustments
self.search_text.setTabOrder(self.search_text, self.replace_text)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.register_shortcuts(parent)
# To highlight found results in the editor
self.highlight_timer = QTimer(self)
self.highlight_timer.setSingleShot(True)
self.highlight_timer.setInterval(300)
self.highlight_timer.timeout.connect(self.highlight_matches)
# Install event filter for search_text
self.search_text.installEventFilter(self)
# To avoid painting number_matches_text on every resize event
self.show_matches_timer = QTimer(self)
self.show_matches_timer.setSingleShot(True)
self.show_matches_timer.setInterval(25)
self.show_matches_timer.timeout.connect(self.show_matches)
def eventFilter(self, widget, event):
"""
Event filter for search_text widget.
Notes
-----
* Emit signals when Enter and Shift+Enter are pressed. These signals
are used for search forward and backward.
* Add crude hack to get tab working between the find/replace boxes.
* Reduce space between the messages_button and the clear one.
"""
# Type check: Prevent error in PySide where 'event' may be of type
# QtGui.QPainter (for whatever reason).
if not isinstance(event, QEvent):
return True
if event.type() == QEvent.KeyPress:
key = event.key()
shift = event.modifiers() & Qt.ShiftModifier
if key == Qt.Key_Return:
if shift:
self.return_shift_pressed.emit()
else:
self.return_pressed.emit()
if key == Qt.Key_Tab:
if self.search_text.hasFocus():
self.replace_text.set_current_text(
self.search_text.currentText())
self.focusNextChild()
if event.type() == QEvent.Paint:
self.messages_button.move(
self.search_text.lineEdit().width() - 42,
self.messages_button.y()
)
return super().eventFilter(widget, event)
def register_shortcuts(self, parent):
"""Register shortcuts for this widget."""
shortcuts = (
('find next', self.find_next, parent),
("find previous", self.find_previous, parent),
('find text', self.show, parent),
('replace text', self.show_replace, parent),
('replace all', self.replace_find_all, parent),
('hide find and replace', self.hide, self),
)
for name, callback, widget in shortcuts:
self.register_shortcut_for_widget(
name=name, triggered=callback, widget=widget
)
def update_search_combo(self):
self.search_text.lineEdit().returnPressed.emit()
def update_replace_combo(self):
self.replace_text.lineEdit().returnPressed.emit()
def show(self, hide_replace=True):
"""Overrides Qt Method"""
QWidget.show(self)
self._width_adjustments()
self.visibility_changed.emit(True)
self.change_number_matches()
if self.editor is not None:
if hide_replace:
if self.replace_widgets[0].isVisible():
self.hide_replace()
else:
self.replace_text_button.setChecked(True)
# When selecting several lines, and replace box is activated the
# text won't be replaced for the selection
text = self.editor.get_selected_text()
if hide_replace or len(text.splitlines()) <= 1:
highlighted = True
# If no text is highlighted for search, use whatever word is
# under the cursor
if not text:
highlighted = False
try:
cursor = self.editor.textCursor()
cursor.select(QTextCursor.WordUnderCursor)
text = str(cursor.selectedText())
except AttributeError:
# We can't do this for all widgets, e.g. WebView's
pass
# Now that text value is sorted out, use it for the search
if text and not self.search_text.currentText() or highlighted:
self.search_text.setEditText(text)
self.search_text.lineEdit().selectAll()
self.refresh()
else:
self.search_text.lineEdit().selectAll()
self.search_text.setFocus()
def resizeEvent(self, event):
super().resizeEvent(event)
self._width_adjustments()
@Slot()
def replace_widget(self, replace_on):
"""Show and hide replace widget"""
if replace_on:
self.show_replace()
else:
self.hide_replace()
def change_replace_state(self):
"""Handle the change of the replace state widget."""
self.replace_on = not self.replace_on
self.replace_text_button.setChecked(self.replace_on)
self.replace_widget(self.replace_on)
def hide(self):
"""Overrides Qt Method"""
for widget in self.replace_widgets:
widget.hide()
QWidget.hide(self)
self.replace_text_button.setChecked(False)
self.visibility_changed.emit(False)
if self.editor is not None:
self.editor.setFocus()
self.clear_matches()
def show_replace(self):
"""Show replace widgets"""
if self.enable_replace:
self.show(hide_replace=False)
for widget in self.replace_widgets:
widget.show()
def hide_replace(self):
"""Hide replace widgets"""
for widget in self.replace_widgets:
widget.hide()
self.replace_text_button.setChecked(False)
def refresh(self):
"""Refresh widget"""
if self.isHidden():
if self.editor is not None:
self.clear_matches()
return
state = self.editor is not None
for widget in self.widgets:
widget.setEnabled(state)
if state:
self.find()
def set_editor(self, editor, refresh=True):
"""Set associated editor."""
# Note: This is necessary to test widgets/editor.py in Qt builds that
# don't have web widgets
try:
from qtpy.QtWebEngineWidgets import QWebEngineView
except ImportError:
QWebEngineView = type(None)
from spyder.plugins.editor.widgets.codeeditor import CodeEditor
self.words_button.setVisible(not isinstance(editor, QWebEngineView))
self.re_button.setVisible(not isinstance(editor, QWebEngineView))
self.is_code_editor = isinstance(editor, CodeEditor)
# Disconnect previous connection to highlight matches
if self.editor is not None and self.is_code_editor:
self.editor.textChanged.disconnect(self.update_matches)
# Set current editor
self.editor = editor
# Keep number of matches updated if editor text has changed
if self.is_code_editor:
self.editor.textChanged.connect(self.update_matches)
if refresh:
self.refresh()
if self.isHidden() and editor is not None:
self.clear_matches()
@Slot()
def find_next(self, set_focus=True):
"""Find next occurrence"""
state = self.find(changed=False, forward=True, rehighlight=False,
multiline_replace_check=False)
if set_focus:
self.editor.setFocus()
self.search_text.add_current_text()
return state
@Slot()
def find_previous(self, set_focus=True):
"""Find previous occurrence"""
state = self.find(changed=False, forward=False, rehighlight=False,
multiline_replace_check=False)
if set_focus:
self.editor.setFocus()
return state
def text_has_been_edited(self, text):
"""
Find text has been edited (this slot won't be triggered when setting
the search pattern combo box text programmatically).
"""
self.find(changed=True, forward=True, start_highlight_timer=True)
def highlight_matches(self):
"""Highlight found results"""
if self.is_code_editor:
text = self.search_text.currentText()
case = self.case_button.isChecked()
word = self.words_button.isChecked()
regexp = self.re_button.isChecked()
self.editor.highlight_found_results(
text, word=word, regexp=regexp, case=case)
def clear_matches(self):
"""Clear all highlighted matches"""
self.matches_string = ""
self.messages_action.setVisible(False)
self.number_matches_text.hide()
if self.is_code_editor:
self.editor.clear_found_results()
def find(self, changed=True, forward=True, rehighlight=True,
start_highlight_timer=False, multiline_replace_check=True):
"""Call the find function"""
# When several lines are selected in the editor and replace box is
# activated, dynamic search is deactivated to prevent changing the
# selection. Otherwise we show matching items.
if multiline_replace_check and self.replace_widgets[0].isVisible():
sel_text = self.editor.get_selected_text()
if len(str(sel_text).splitlines()) > 1:
return None
text = self.search_text.currentText()
if len(text) == 0:
if not self.is_code_editor:
# Clears the selection for WebEngine
self.editor.find_text('')
self.change_number_matches()
self.clear_matches()
return None
else:
case = self.case_button.isChecked()
word = self.words_button.isChecked()
regexp = self.re_button.isChecked()
found = self.editor.find_text(text, changed, forward, case=case,
word=word, regexp=regexp)
error_msg = False
if not found and regexp:
error_msg = regexp_error_msg(text)
if error_msg:
self.show_error(error_msg)
# No need to continue after this point if we detected an error in
# the passed regexp.
if error_msg:
return
if self.is_code_editor and found:
cursor = QTextCursor(self.editor.textCursor())
TextHelper(self.editor).unfold_if_colapsed(cursor)
if rehighlight or not self.editor.found_results:
self.highlight_timer.stop()
if start_highlight_timer:
self.highlight_timer.start()
else:
self.highlight_matches()
else:
self.clear_matches()
number_matches = self.editor.get_number_matches(text, case=case,
regexp=regexp,
word=word)
if hasattr(self.editor, 'get_match_number'):
match_number = self.editor.get_match_number(text, case=case,
regexp=regexp,
word=word)
else:
match_number = 0
self.change_number_matches(current_match=match_number,
total_matches=number_matches)
return found
@Slot()
def replace_find(self, focus_replace_text=False):
"""Replace and find."""
if self.editor is None:
return
replace_text = str(self.replace_text.currentText())
search_text = str(self.search_text.currentText())
re_pattern = None
case = self.case_button.isChecked()
re_flags = re.MULTILINE if case else re.IGNORECASE | re.MULTILINE
# Check regexp before proceeding
if self.re_button.isChecked():
try:
re_pattern = re.compile(search_text, flags=re_flags)
# Check if replace_text can be substituted in re_pattern
# Fixes spyder-ide/spyder#7177.
re_pattern.sub(replace_text, '')
except re.error:
# Do nothing with an invalid regexp
return
# First found
seltxt = str(self.editor.get_selected_text())
cmptxt1 = search_text if case else search_text.lower()
cmptxt2 = seltxt if case else seltxt.lower()
do_replace = True
if re_pattern is None:
has_selected = self.editor.has_selected_text()
if not has_selected or cmptxt1 != cmptxt2:
if not self.find(changed=False, forward=True,
rehighlight=False):
do_replace = False
else:
if len(re_pattern.findall(cmptxt2)) <= 0:
if not self.find(changed=False, forward=True,
rehighlight=False):
do_replace = False
cursor = None
if do_replace:
cursor = self.editor.textCursor()
cursor.beginEditBlock()
if re_pattern is None:
cursor.removeSelectedText()
cursor.insertText(replace_text)
else:
seltxt = str(cursor.selectedText())
# Note: If the selection obtained from an editor spans a line
# break, the text will contain a Unicode U+2029 paragraph
# separator character instead of a newline \n character.
# See: spyder-ide/spyder#2675
eol_char = get_eol_chars(self.editor.toPlainText())
seltxt = seltxt.replace(u'\u2029', eol_char)
cursor.removeSelectedText()
cursor.insertText(re_pattern.sub(replace_text, seltxt))
if self.find_next(set_focus=False):
found_cursor = self.editor.textCursor()
cursor.setPosition(found_cursor.selectionStart(),
QTextCursor.MoveAnchor)
cursor.setPosition(found_cursor.selectionEnd(),
QTextCursor.KeepAnchor)
if cursor is not None:
cursor.endEditBlock()
if focus_replace_text:
self.replace_text.setFocus()
else:
self.editor.setFocus()
if getattr(self.editor, 'document_did_change', False):
self.editor.document_did_change()
@Slot()
def replace_find_all(self):
"""Replace and find all matching occurrences"""
if self.editor is None:
return
replace_text = str(self.replace_text.currentText())
search_text = str(self.search_text.currentText())
case = self.case_button.isChecked()
word = self.words_button.isChecked()
re_flags = re.MULTILINE if case else re.IGNORECASE | re.MULTILINE
if self.re_button.isChecked():
pattern = search_text
else:
pattern = re.escape(search_text)
# re.sub processes backslashes so they must be escaped
# See spyder-ide/spyder#21007.
replace_text = replace_text.replace('\\', r'\\')
# Match whole words only
if word:
pattern = r'\b{pattern}\b'.format(pattern=pattern)
# Check regexp before proceeding
re_pattern = None
try:
re_pattern = re.compile(pattern, flags=re_flags)
# Check if replace_text can be substituted in re_pattern
# Fixes spyder-ide/spyder#7177.
re_pattern.sub(replace_text, '')
except re.error:
# Do nothing with an invalid regexp
return
cursor = self.editor._select_text("sof", "eof")
text = self.editor.toPlainText()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(re_pattern.sub(replace_text, text))
cursor.endEditBlock()
self.editor.setFocus()
@Slot()
def replace_find_selection(self, focus_replace_text=False):
"""Replace and find in the current selection"""
if self.editor is not None:
replace_text = str(self.replace_text.currentText())
search_text = str(self.search_text.currentText())
case = self.case_button.isChecked()
word = self.words_button.isChecked()
re_flags = re.MULTILINE if case else re.IGNORECASE | re.MULTILINE
re_pattern = None
if self.re_button.isChecked():
pattern = search_text
else:
pattern = re.escape(search_text)
# re.sub processes backslashes so they must be escaped
replace_text = replace_text.replace('\\', r'\\')
if word: # match whole words only
pattern = r'\b{pattern}\b'.format(pattern=pattern)
# Check regexp before proceeding
try:
re_pattern = re.compile(pattern, flags=re_flags)
# Check if replace_text can be substituted in re_pattern
# Fixes spyder-ide/spyder#7177.
re_pattern.sub(replace_text, '')
except re.error:
# Do nothing with an invalid regexp
return
selected_text = str(self.editor.get_selected_text())
replacement = re_pattern.sub(replace_text, selected_text)
if replacement != selected_text:
cursor = self.editor.textCursor()
start_pos = cursor.selectionStart()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(replacement)
# Restore selection
self.editor.set_cursor_position(start_pos)
for c in range(len(replacement)):
self.editor.extend_selection_to_next('character', 'right')
cursor.endEditBlock()
if focus_replace_text:
self.replace_text.setFocus()
else:
self.editor.setFocus()
if getattr(self.editor, 'document_did_change', False):
self.editor.document_did_change()
def change_number_matches(self, current_match=0, total_matches=0):
"""Change number of match and total matches."""
if current_match and total_matches:
self.matches_string = "{} {} {}".format(current_match, _("of"),
total_matches)
self.show_matches()
elif total_matches:
self.matches_string = "{} {}".format(total_matches, _("matches"))
self.show_matches()
else:
self.number_matches_text.hide()
if self.search_text.currentText():
self.show_no_matches()
def update_matches(self):
"""Update total number of matches if text has changed in the editor."""
if self.isVisible():
number_matches = self.editor.get_number_matches(
self.search_text.lineEdit().text(),
case=self.case_button.isChecked(),
regexp=self.re_button.isChecked(),
word=self.words_button.isChecked()
)
self.change_number_matches(total_matches=number_matches)
def show_no_matches(self):
"""Show a no matches message with an icon."""
self._show_icon_message('no_matches')
def show_matches(self):
"""Show the number of matches found in the document."""
if not self.matches_string:
return
self.number_matches_text.show()
self.messages_action.setVisible(False)
if self.hide_number_matches_text:
self.number_matches_text.setPixmap(self.number_matches_pixmap)
self.number_matches_text.setToolTip(self.matches_string)
else:
self.number_matches_text.setPixmap(QPixmap())
self.number_matches_text.setText(self.matches_string)
def show_error(self, error_msg):
"""Show a regexp error message with an icon."""
self._show_icon_message('error', extra_info=error_msg)
def _show_icon_message(self, kind, extra_info=None):
"""
Show a message to users with an icon when no matches can be found or
there's an error in the passed regexp.
Parameters
----------
kind: str
The kind of message. It can be 'no_matches' or 'error'.
extra_info:
Extra info to add to the icon's tooltip.
"""
if kind == 'no_matches':
tooltip = self.TOOLTIP['no_matches']
icon = self.no_matches_icon
else:
tooltip = self.TOOLTIP['regexp_error']
icon = self.error_icon
if extra_info:
tooltip = tooltip + ': ' + extra_info
self.messages_action.setIcon(icon)
self.messages_action.setToolTip(tooltip)
self.messages_action.setVisible(True)
def _width_adjustments(self):
"""Several adjustments according to the widget's total width."""
# The widgets list includes search_text and number_matches_text. That's
# why we substract a 2 below.
buttons_width = self.icon_size.width() * (len(self.widgets) - 2)
total_width = self.size().width()
matches_width = self.number_matches_text.size().width()
minimal_width = (
self.search_text.recommended_width + buttons_width + matches_width
)
if total_width < minimal_width:
self.search_text.setMinimumWidth(30)
self.hide_number_matches_text = True
else:
self.search_text.setMinimumWidth(int(total_width / 2))
self.hide_number_matches_text = False
# We don't call show_matches directly here to avoid flickering when the
# user hits the widget's minimal width, which changes from text to an
# icon (or vice versa) for number_matches_text.
self.show_matches_timer.start()
def _resize_replace_text(self, size, old_size):
"""
Resize replace_text combobox to match the width of the search one.
"""
self.replace_text.setMinimumWidth(size.width())
self.replace_text.setMaximumWidth(size.width())
| FindReplace |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_area01.py | {
"start": 315,
"end": 1539
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_area01.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1:A1")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
def test_create_file_single_cell(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/got_ocr2/modular_got_ocr2.py | {
"start": 9456,
"end": 9911
} | class ____(SamVisionLayer):
def __init__(self, config, window_size):
super().__init__(config, window_size)
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn = GotOcr2VisionAttention(config, window_size)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = GotOcr2MLPBlock(config)
self.window_size = window_size
| GotOcr2VisionLayer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 45722,
"end": 46290
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("assignable_id", "assignee_ids", "client_mutation_id")
assignable_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="assignableId"
)
assignee_ids = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ID))),
graphql_name="assigneeIds",
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddAssigneesToAssignableInput |
python | apache__airflow | providers/google/tests/unit/google/common/hooks/test_base_google.py | {
"start": 31582,
"end": 36127
} | class ____:
def setup_method(self):
with mock.patch(
MODULE_NAME + ".GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.instance = hook.GoogleBaseHook(gcp_conn_id="google-cloud-default")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=mock.PropertyMock,
return_value="PROJECT_ID",
)
@mock.patch(MODULE_NAME + ".check_output")
def test_provide_authorized_gcloud_key_path_and_keyfile_dict(self, mock_check_output, mock_default):
key_path = "/test/key-path"
self.instance.extras = {
"key_path": key_path,
"keyfile_dict": '{"foo": "bar"}',
}
with pytest.raises(
AirflowException,
match="The `keyfile_dict` and `key_path` fields are mutually exclusive. "
"Please provide only one value.",
):
with self.instance.provide_authorized_gcloud():
assert os.environ[CREDENTIALS] == key_path
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=mock.PropertyMock,
return_value="PROJECT_ID",
)
@mock.patch(MODULE_NAME + ".check_output")
def test_provide_authorized_gcloud_key_path(self, mock_check_output, mock_project_id):
key_path = "/test/key-path"
self.instance.extras = {"key_path": key_path}
with self.instance.provide_authorized_gcloud():
assert os.environ[CREDENTIALS] == key_path
calls = [
mock.call(["gcloud", "auth", "activate-service-account", "--key-file=/test/key-path"]),
mock.call(["gcloud", "config", "set", "core/project", "PROJECT_ID"]),
]
mock_check_output.assert_has_calls(calls)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=mock.PropertyMock,
return_value="PROJECT_ID",
)
@mock.patch(MODULE_NAME + ".check_output")
@mock.patch("tempfile.NamedTemporaryFile")
def test_provide_authorized_gcloud_keyfile_dict(self, mock_file, mock_check_output, mock_project_id):
string_file = StringIO()
file_content = '{"foo": "bar"}'
file_name = "/test/mock-file"
self.instance.extras = {"keyfile_dict": file_content}
mock_file_handler = mock_file.return_value.__enter__.return_value
mock_file_handler.name = file_name
mock_file_handler.write = string_file.write
with self.instance.provide_authorized_gcloud():
assert os.environ[CREDENTIALS] == file_name
calls = [
mock.call(["gcloud", "auth", "activate-service-account", "--key-file=/test/mock-file"]),
mock.call(["gcloud", "config", "set", "core/project", "PROJECT_ID"]),
]
mock_check_output.assert_has_calls(calls)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=mock.PropertyMock,
return_value="PROJECT_ID",
)
@mock.patch(MODULE_NAME + "._cloud_sdk")
@mock.patch(MODULE_NAME + ".check_output")
@mock.patch("tempfile.NamedTemporaryFile")
def test_provide_authorized_gcloud_via_gcloud_application_default(
self, mock_file, mock_check_output, mock_cloud_sdk, mock_project_id
):
# This file always exists.
mock_cloud_sdk.get_application_default_credentials_path.return_value = __file__
file_content = json.dumps(
{
"client_id": "CLIENT_ID",
"client_secret": "CLIENT_SECRET",
"refresh_token": "REFRESH_TOKEN",
"type": "authorized_user",
}
)
with mock.patch(MODULE_NAME + ".open", mock.mock_open(read_data=file_content)):
with self.instance.provide_authorized_gcloud():
# Do nothing
pass
mock_check_output.assert_has_calls(
[
mock.call(["gcloud", "config", "set", "auth/client_id", "CLIENT_ID"]),
mock.call(["gcloud", "config", "set", "auth/client_secret", "CLIENT_SECRET"]),
mock.call(["gcloud", "auth", "activate-refresh-token", "CLIENT_ID", "REFRESH_TOKEN"]),
mock.call(["gcloud", "config", "set", "core/project", "PROJECT_ID"]),
],
any_order=False,
)
| TestProvideAuthorizedGcloud |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.