language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Textualize__textual | docs/examples/guide/widgets/fizzbuzz01.py | {
"start": 110,
"end": 479
} | class ____(Static):
def on_mount(self) -> None:
table = Table("Number", "Fizz?", "Buzz?")
for n in range(1, 16):
fizz = not n % 3
buzz = not n % 5
table.add_row(
str(n),
"fizz" if fizz else "",
"buzz" if buzz else "",
)
self.update(table)
| FizzBuzz |
python | doocs__leetcode | lcof/面试题48. 最长不含重复字符的子字符串/Solution2.py | {
"start": 0,
"end": 309
} | class ____:
def lengthOfLongestSubstring(self, s: str) -> int:
vis = set()
ans = j = 0
for i, c in enumerate(s):
while c in vis:
vis.remove(s[j])
j += 1
vis.add(c)
ans = max(ans, i - j + 1)
return ans
| Solution |
python | coleifer__peewee | tests/regressions.py | {
"start": 3794,
"end": 3865
} | class ____(TestModel):
a = ForeignKeyField(DiA)
b = TextField()
| DiB |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/pooling.py | {
"start": 30487,
"end": 33142
} | class ____(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
Args:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
`(2, 2, 2)` will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
input_channels = 3
inputs = tf.keras.Input(shape=(depth, height, width, input_channels))
layer = tf.keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
| AveragePooling3D |
python | python__mypy | mypyc/ir/ops.py | {
"start": 41478,
"end": 42294
} | class ____(RegisterOp):
"""Load a low-level global variable/pointer.
Note that can't be used to directly load Python module-level
global variable, since they are stored in a globals dictionary
and accessed using dictionary operations.
"""
error_kind = ERR_NEVER
is_borrowed = True
def __init__(self, type: RType, identifier: str, line: int = -1, ann: object = None) -> None:
super().__init__(line)
self.identifier = identifier
self.type = type
self.ann = ann # An object to pretty print with the load
def sources(self) -> list[Value]:
return []
def set_sources(self, new: list[Value]) -> None:
assert not new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_load_global(self)
@final
| LoadGlobal |
python | doocs__leetcode | solution/3300-3399/3380.Maximum Area Rectangle With Point Constraints I/Solution.py | {
"start": 0,
"end": 783
} | class ____:
def maxRectangleArea(self, points: List[List[int]]) -> int:
def check(x1: int, y1: int, x2: int, y2: int) -> bool:
cnt = 0
for x, y in points:
if x < x1 or x > x2 or y < y1 or y > y2:
continue
if (x == x1 or x == x2) and (y == y1 or y == y2):
cnt += 1
continue
return False
return cnt == 4
ans = -1
for i, (x1, y1) in enumerate(points):
for x2, y2 in points[:i]:
x3, y3 = min(x1, x2), min(y1, y2)
x4, y4 = max(x1, x2), max(y1, y2)
if check(x3, y3, x4, y4):
ans = max(ans, (x4 - x3) * (y4 - y3))
return ans
| Solution |
python | huggingface__transformers | tests/models/plbart/test_modeling_plbart.py | {
"start": 16856,
"end": 18302
} | class ____(AbstractSeq2SeqIntegrationTest):
checkpoint_name = "uclanlp/plbart-base"
src_text = ["Is 0 the first Fibonacci number ?", "Find the sum of all prime numbers ."]
tgt_text = ["0 the first Fibonacci number?", "the sum of all prime numbers.......... the the"]
def test_base_generate(self):
inputs = self.tokenizer([self.src_text[0]], return_tensors="pt").to(torch_device)
src_lan = self.tokenizer._convert_lang_code_special_format("en_XX")
translated_tokens = self.model.generate(
input_ids=inputs["input_ids"].to(torch_device),
decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan],
)
decoded = self.tokenizer.decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
def test_fill_mask(self):
inputs = self.tokenizer(["Is 0 the <mask> Fibonacci <mask> ?"], return_tensors="pt").to(torch_device)
src_lan = self.tokenizer._convert_lang_code_special_format("en_XX")
outputs = self.model.generate(
inputs["input_ids"], decoder_start_token_id=self.tokenizer.lang_code_to_id[src_lan], num_beams=1
)
prediction: str = self.tokenizer.decode(outputs, clean_up_tokenization_spaces=True, skip_special_tokens=True)[
0
]
self.assertEqual(prediction, "0 0 the 0 the 0 the 0 the 0 the 0 the 0 the 0 the")
| PLBartBaseIntegrationTest |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold_status_index.py | {
"start": 2197,
"end": 2377
} | class ____(TypedDict, total=False):
start: datetime
end: datetime
environment: list[str]
projectSlug: list[str]
release: list[str]
| ReleaseThresholdStatusIndexData |
python | pypa__setuptools | setuptools/_static.py | {
"start": 3984,
"end": 4855
} | class ____(packaging.specifiers.SpecifierSet, Static):
"""Not exactly a built-in type but useful for ``requires-python``"""
T = TypeVar("T")
def noop(value: T) -> T:
"""
>>> noop(42)
42
"""
return value
_CONVERSIONS = {str: Str, tuple: Tuple, list: List, dict: Dict}
def attempt_conversion(value: T) -> T:
"""
>>> is_static(attempt_conversion("hello"))
True
>>> is_static(object())
False
"""
return _CONVERSIONS.get(type(value), noop)(value) # type: ignore[call-overload]
def is_static(value: object) -> bool:
"""
>>> is_static(a := Dict({'a': 1}))
True
>>> is_static(dict(a))
False
>>> is_static(b := List([1, 2, 3]))
True
>>> is_static(list(b))
False
"""
return isinstance(value, Static) and not value._mutated_
EMPTY_LIST = List()
EMPTY_DICT = Dict()
| SpecifierSet |
python | coleifer__peewee | peewee.py | {
"start": 26489,
"end": 29688
} | class ____(_HashableSource, BaseTable):
def __init__(self, name, columns=None, primary_key=None, schema=None,
alias=None, _model=None, _database=None):
self.__name__ = name
self._columns = columns
self._primary_key = primary_key
self._schema = schema
self._path = (schema, name) if schema else (name,)
self._model = _model
self._database = _database
super(Table, self).__init__(alias=alias)
# Allow tables to restrict what columns are available.
if columns is not None:
self.c = _ExplicitColumn()
for column in columns:
setattr(self, column, Column(self, column))
if primary_key:
col_src = self if self._columns else self.c
self.primary_key = getattr(col_src, primary_key)
else:
self.primary_key = None
def clone(self):
# Ensure a deep copy of the column instances.
return Table(
self.__name__,
columns=self._columns,
primary_key=self._primary_key,
schema=self._schema,
alias=self._alias,
_model=self._model,
_database=self._database)
def bind(self, database=None):
self._database = database
return self
def bind_ctx(self, database=None):
return _BoundTableContext(self, database)
def _get_hash(self):
return hash((self.__class__, self._path, self._alias, self._model))
@__bind_database__
def select(self, *columns):
if not columns and self._columns:
columns = [Column(self, column) for column in self._columns]
return Select((self,), columns)
@__bind_database__
def insert(self, insert=None, columns=None, **kwargs):
if kwargs:
insert = {} if insert is None else insert
src = self if self._columns else self.c
for key, value in kwargs.items():
insert[getattr(src, key)] = value
return Insert(self, insert=insert, columns=columns)
@__bind_database__
def replace(self, insert=None, columns=None, **kwargs):
return (self
.insert(insert=insert, columns=columns)
.on_conflict('REPLACE'))
@__bind_database__
def update(self, update=None, **kwargs):
if kwargs:
update = {} if update is None else update
for key, value in kwargs.items():
src = self if self._columns else self.c
update[getattr(src, key)] = value
return Update(self, update=update)
@__bind_database__
def delete(self):
return Delete(self)
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
# Return the quoted table name.
return ctx.sql(Entity(*self._path))
if self._alias:
ctx.alias_manager[self] = self._alias
if ctx.scope == SCOPE_SOURCE:
# Define the table and its alias.
return self.apply_alias(ctx.sql(Entity(*self._path)))
else:
# Refer to the table using the alias.
return self.apply_column(ctx)
| Table |
python | astropy__astropy | astropy/cosmology/_src/tests/test_utils.py | {
"start": 1278,
"end": 2388
} | class ____:
@pytest.mark.parametrize(
"z, expect",
list(
zip(
valid_zs,
[0, 1, 1100, np.float64(3300), 2.0, 3.0, z_arr, z_arr, z_arr, z_arr],
)
),
)
def test_valid(self, z, expect):
"""Test :func:`astropy.cosmology._src.utils.aszarr`."""
got = aszarr(z)
assert np.array_equal(got, expect)
@pytest.mark.parametrize("z, exc", invalid_zs)
def test_invalid(self, z, exc):
"""Test :func:`astropy.cosmology._src.utils.aszarr`."""
with pytest.raises(exc):
aszarr(z)
@pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas")
def test_pandas(self):
import pandas as pd
x = pd.Series([1, 2, 3, 4, 5])
# Demonstrate Pandas doesn't work with units
assert not isinstance(x * u.km, u.Quantity)
# Test aszarr works with Pandas
assert isinstance(aszarr(x), np.ndarray)
np.testing.assert_array_equal(aszarr(x), x.values)
# -------------------------------------------------------------------
| Test_aszarr |
python | django__django | tests/proxy_models/models.py | {
"start": 2107,
"end": 2226
} | class ____(MyPersonProxy):
status = models.CharField(max_length=80)
objects = models.Manager()
| LowerStatusPerson |
python | huggingface__transformers | tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py | {
"start": 5299,
"end": 10819
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(DeepseekVLHybridModel, DeepseekVLHybridForConditionalGeneration) if is_torch_available() else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DeepseekVLHybridModel,
"image-text-to-text": DeepseekVLHybridForConditionalGeneration,
"any-to-any": DeepseekVLHybridForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
model_split_percents = [0.5, 0.85, 0.9] # it tries to offload everything with the default value
def setUp(self):
self.model_tester = DeepseekVLHybridModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeepseekVLHybridConfig, has_text_modality=False)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["high_res_pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for VLMs.
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["high_res_pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
def test_sdpa_can_dispatch_composite_models(self):
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Load the model with SDPA
model_sdpa = model_class.from_pretrained(
tmpdirname,
attn_implementation="sdpa",
)
model_sdpa = model_sdpa.eval().to(torch_device)
# Load model with eager attention
model_eager = model_class.from_pretrained(
tmpdirname,
attn_implementation="eager",
)
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.config._attn_implementation == "eager")
if (
hasattr(model_sdpa, "vision_model")
and hasattr(model_sdpa, "high_res_vision_model")
and hasattr(model_sdpa, "language_model")
):
self.assertTrue(model_sdpa.language_model.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model_sdpa.high_res_vision_model.config._attn_implementation == "sdpa")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
self.assertTrue(model_eager.high_res_vision_model.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if (
any(re.finditer(r"Attention(?!Pool)", class_name))
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "sdpa"
):
self.assertTrue(submodule.config._attn_implementation == "eager")
for name, submodule in model_sdpa.named_modules():
class_name = submodule.__class__.__name__
if (
any(re.finditer(r"Attention(?!Pool)", class_name))
and getattr(submodule, "config", None)
and submodule.config._attn_implementation == "eager"
):
self.assertTrue(submodule.config._attn_implementation == "sdpa")
@require_torch_accelerator
@slow
def test_sdpa_can_dispatch_on_flash(self):
self.skipTest(
"deepseek_vl_hybrid uses SAM, which requires an attention_mask input for relative positional embeddings"
)
@require_torch
@require_torch_accelerator
@slow
| DeepseekVLHybridModelTest |
python | PyCQA__pylint | tests/functional/r/regression/regression_4723.py | {
"start": 191,
"end": 271
} | class ____:
@contextlib.contextmanager
def get(self):
yield self
| A |
python | Unity-Technologies__ml-agents | ml-agents-trainer-plugin/mlagents_trainer_plugin/a2c/a2c_optimizer.py | {
"start": 1285,
"end": 7052
} | class ____(TorchOptimizer):
def __init__(self, policy: TorchPolicy, trainer_settings: TrainerSettings):
"""
Takes a Policy and a Dict of trainer parameters and creates an Optimizer around the policy.
The A2C optimizer has a value estimator and a loss function.
:param policy: A TorchPolicy object that will be updated by this A2C Optimizer.
:param trainer_params: Trainer parameters dictionary that specifies the
properties of the trainer.
"""
# Create the graph here to give more granular control of the TF graph to the Optimizer.
super().__init__(policy, trainer_settings)
self.hyperparameters: A2CSettings = cast(
A2CSettings, trainer_settings.hyperparameters
)
params = list(self.policy.actor.parameters())
if self.hyperparameters.shared_critic:
self._critic = policy.actor
else:
self._critic = ValueNetwork(
list(self.reward_signals.keys()),
policy.behavior_spec.observation_specs,
network_settings=trainer_settings.network_settings,
)
self._critic.to(default_device())
params += list(self._critic.parameters())
self.decay_learning_rate = ModelUtils.DecayedValue(
self.hyperparameters.learning_rate_schedule,
self.hyperparameters.learning_rate,
1e-10,
self.trainer_settings.max_steps,
)
self.decay_beta = ModelUtils.DecayedValue(
self.hyperparameters.beta_schedule,
self.hyperparameters.beta,
1e-10,
self.trainer_settings.max_steps,
)
self.optimizer = torch.optim.Adam(
params, lr=self.trainer_settings.hyperparameters.learning_rate
)
self.stats_name_to_update_name = {
"Losses/Value Loss": "value_loss",
"Losses/Policy Loss": "policy_loss",
}
self.stream_names = list(self.reward_signals.keys())
@property
def critic(self):
return self._critic
@timed
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:
"""
Performs update on model.
:param batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
# Get decayed parameters
decay_lr = self.decay_learning_rate.get_value(self.policy.get_current_step())
decay_bet = self.decay_beta.get_value(self.policy.get_current_step())
returns = {}
for name in self.reward_signals:
returns[name] = ModelUtils.list_to_tensor(
batch[RewardSignalUtil.returns_key(name)]
)
n_obs = len(self.policy.behavior_spec.observation_specs)
current_obs = ObsUtil.from_buffer(batch, n_obs)
# Convert to tensors
current_obs = [ModelUtils.list_to_tensor(obs) for obs in current_obs]
act_masks = ModelUtils.list_to_tensor(batch[BufferKey.ACTION_MASK])
actions = AgentAction.from_buffer(batch)
memories = [
ModelUtils.list_to_tensor(batch[BufferKey.MEMORY][i])
for i in range(0, len(batch[BufferKey.MEMORY]), self.policy.sequence_length)
]
if len(memories) > 0:
memories = torch.stack(memories).unsqueeze(0)
# Get value memories
value_memories = [
ModelUtils.list_to_tensor(batch[BufferKey.CRITIC_MEMORY][i])
for i in range(
0, len(batch[BufferKey.CRITIC_MEMORY]), self.policy.sequence_length
)
]
if len(value_memories) > 0:
value_memories = torch.stack(value_memories).unsqueeze(0)
run_out = self.policy.actor.get_stats(
current_obs,
masks=act_masks,
actions=actions,
memories=memories,
sequence_length=self.policy.sequence_length,
)
log_probs = run_out["log_probs"]
entropy = run_out["entropy"]
values, _ = self.critic.critic_pass(
current_obs,
memories=value_memories,
sequence_length=self.policy.sequence_length,
)
log_probs = log_probs.flatten()
value_loss_per_head = []
for name, head in values.items():
returns_tensor = returns[name]
be = (returns_tensor - head) ** 2
value_loss_per_head.append(be)
value_loss = torch.mean(torch.stack(value_loss_per_head))
advantages = ModelUtils.list_to_tensor(batch[BufferKey.ADVANTAGES])
policy_loss = -1 * torch.mean(torch.sum(log_probs, dim=1) * advantages)
loss = policy_loss + 0.5 * value_loss - decay_bet * torch.mean(entropy)
# Set optimizer learning rate
ModelUtils.update_learning_rate(self.optimizer, decay_lr)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
update_stats = {
# NOTE: abs() is not technically correct, but matches the behavior in TensorFlow.
# TODO: After PyTorch is default, change to something more correct.
"Losses/Policy Loss": torch.abs(policy_loss).item(),
"Losses/Value Loss": value_loss.item(),
"Policy/Learning Rate": decay_lr,
"Policy/Beta": decay_bet,
}
return update_stats
def get_modules(self):
modules = {
"Optimizer:value_optimizer": self.optimizer,
"Optimizer:critic": self._critic,
}
for reward_provider in self.reward_signals.values():
modules.update(reward_provider.get_modules())
return modules
| A2COptimizer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/job.py | {
"start": 805,
"end": 1111
} | class ____(Enum):
"""
Possible error codes that can be returned by BulkOperationUserError.
https://shopify.dev/docs/api/admin-graphql/latest/enums/BulkOperationUserErrorCode
"""
INVALID = "INVALID"
OPERATION_IN_PROGRESS = "OPERATION_IN_PROGRESS"
@dataclass
| BulkOperationUserErrorCode |
python | neetcode-gh__leetcode | python/2971-find-polygon-with-the-largest-perimeter.py | {
"start": 327,
"end": 622
} | class ____:
def largestPerimeter(self, nums: List[int]) -> int:
curSum = sum(nums)
heapq._heapify_max(nums)
while nums and curSum <= nums[0] * 2:
curSum -= heapq._heappop_max(nums)
return curSum if len(nums) > 2 else -1
| Solution |
python | mlflow__mlflow | mlflow/gateway/config.py | {
"start": 6781,
"end": 6877
} | class ____(AWSBaseConfig):
aws_role_arn: str
session_length_seconds: int = 15 * 60
| AWSRole |
python | getsentry__sentry | src/sentry/integrations/slack/analytics.py | {
"start": 646,
"end": 816
} | class ____(analytics.Event):
provider: str
actor_id: int
actor_type: str
@analytics.eventclass("integrations.slack.chart_unfurl")
| SlackIntegrationIdentityLinked |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 607609,
"end": 607952
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("ReviewDismissalAllowance", graphql_name="node")
| ReviewDismissalAllowanceEdge |
python | doocs__leetcode | solution/0800-0899/0892.Surface Area of 3D Shapes/Solution.py | {
"start": 0,
"end": 414
} | class ____:
def surfaceArea(self, grid: List[List[int]]) -> int:
ans = 0
for i, row in enumerate(grid):
for j, v in enumerate(row):
if v:
ans += 2 + v * 4
if i:
ans -= min(v, grid[i - 1][j]) * 2
if j:
ans -= min(v, grid[i][j - 1]) * 2
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/framework/immutable_dict.py | {
"start": 917,
"end": 1563
} | class ____(collections.abc.Mapping):
"""Immutable `Mapping`."""
# Note: keys, items, values, get, __eq__, and __ne__ are implemented by
# the `Mapping` base class.
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return f'ImmutableDict({self._dict})'
# This suppresses a warning that tf.nest would otherwise generate.
__supported_by_tf_nest__ = True
| ImmutableDict |
python | Pylons__pyramid | src/pyramid/exceptions.py | {
"start": 3513,
"end": 3844
} | class ____(ConfigurationError):
"""An error occurred during execution of a configuration action"""
def __init__(self, etype, evalue, info):
self.etype, self.evalue, self.info = etype, evalue, info
def __str__(self):
return f"{self.etype}: {self.evalue}\n in:\n {self.info}"
| ConfigurationExecutionError |
python | doocs__leetcode | lcp/LCP 33. 蓄水/Solution.py | {
"start": 0,
"end": 332
} | class ____:
def storeWater(self, bucket: List[int], vat: List[int]) -> int:
mx = max(vat)
if mx == 0:
return 0
ans = inf
for x in range(1, mx + 1):
y = sum(max(0, (v + x - 1) // x - b) for v, b in zip(vat, bucket))
ans = min(ans, x + y)
return ans
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/numeric/test_indexing.py | {
"start": 15986,
"end": 17373
} | class ____:
@pytest.mark.parametrize(
"index",
[
Index(np.arange(5, dtype="float64")),
Index(range(0, 20, 2), dtype=np.int64),
Index(np.arange(5, dtype="uint64")),
],
)
def test_where(self, listlike_box, index):
cond = [True] * len(index)
expected = index
result = index.where(listlike_box(cond))
cond = [False] + [True] * (len(index) - 1)
expected = Index([index._na_value] + index[1:].tolist(), dtype=np.float64)
result = index.where(listlike_box(cond))
tm.assert_index_equal(result, expected)
def test_where_uint64(self):
idx = Index([0, 6, 2], dtype=np.uint64)
mask = np.array([False, True, False])
other = np.array([1], dtype=np.int64)
expected = Index([1, 6, 1], dtype=np.uint64)
result = idx.where(mask, other)
tm.assert_index_equal(result, expected)
result = idx.putmask(~mask, other)
tm.assert_index_equal(result, expected)
def test_where_infers_type_instead_of_trying_to_convert_string_to_float(self):
# GH 32413
index = Index([1, np.nan])
cond = index.notna()
other = Index(["a", "b"], dtype="string")
expected = Index([1.0, "b"])
result = index.where(cond, other)
tm.assert_index_equal(result, expected)
| TestWhere |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolModule2.py | {
"start": 1630,
"end": 1809
} | class ____(Protocol[_T1]):
def func_1(self, a: int, b: _T1) -> _T1: ...
def func4(x: P5[_T1]) -> _T1: ...
v5 = func4(protocolModule1)
reveal_type(v5, expected_text="str")
| P5 |
python | plotly__plotly.py | plotly/graph_objs/densitymapbox/_hoverlabel.py | {
"start": 233,
"end": 11283
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymapbox"
_path_str = "densitymapbox.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.densitymapbox.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.densitymapbox.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymapbox.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymapbox.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | PrefectHQ__prefect | src/prefect/client/schemas/events.py | {
"start": 290,
"end": 1541
} | class ____(PrefectBaseModel):
"""a single page of events returned from the API"""
events: list[ReceivedEvent] = Field(description="the events matching the query")
total: int = Field(description="the total number of matching events")
next_page: AnyHttpUrl | None = Field(
description="the URL for the next page of results, if there are more"
)
async def get_next_page(self, client: "PrefectClient") -> "EventPage | None":
"""
fetch the next page of events.
args:
client: the PrefectClient instance to use for fetching
returns:
the next EventPage, or None if there are no more pages
"""
if not self.next_page:
return None
return await client.read_events_page(self.next_page)
def get_next_page_sync(self, client: "SyncPrefectClient") -> "EventPage | None":
"""
fetch the next page of events (sync version).
args:
client: the SyncPrefectClient instance to use for fetching
returns:
the next EventPage, or None if there are no more pages
"""
if not self.next_page:
return None
return client.read_events_page(self.next_page)
| EventPage |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 558130,
"end": 558629
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteProjectV2Field"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "project_v2_field")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
project_v2_field = sgqlc.types.Field("ProjectV2FieldConfiguration", graphql_name="projectV2Field")
"""The deleted field."""
| DeleteProjectV2FieldPayload |
python | readthedocs__readthedocs.org | readthedocs/api/v2/serializers.py | {
"start": 1389,
"end": 3162
} | class ____(ProjectSerializer):
"""
Project serializer for admin only access.
Includes special internal fields that don't need to be exposed through the
general API, mostly for fields used in the build process
"""
features = serializers.SlugRelatedField(
many=True,
read_only=True,
slug_field="feature_id",
)
environment_variables = serializers.SerializerMethodField()
skip = serializers.SerializerMethodField()
def get_environment_variables(self, obj):
"""Get all environment variables, including public ones."""
return {
variable.name: {
"value": variable.value,
"public": variable.public,
}
for variable in obj.environmentvariable_set.all()
}
def get_skip(self, obj):
"""
Override ``Project.skip`` to consider more cases whether skip a project.
We rely on ``.is_active`` manager's method here that encapsulates all
these possible cases.
"""
return not Project.objects.is_active(obj)
class Meta(ProjectSerializer.Meta):
fields = ProjectSerializer.Meta.fields + (
"analytics_code",
"analytics_disabled",
"cdn_enabled",
"container_image",
"container_mem_limit",
"container_time_limit",
"skip",
"features",
"has_valid_clone",
"has_valid_webhook",
"show_advertising",
"environment_variables",
"max_concurrent_builds",
"readthedocs_yaml_path",
"clone_token",
"has_ssh_key_with_write_access",
"git_checkout_command",
)
| ProjectAdminSerializer |
python | qdrant__qdrant-client | qdrant_client/http/exceptions.py | {
"start": 1497,
"end": 1616
} | class ____(ApiException):
def __init__(self, source: Exception):
self.source = source
| ResponseHandlingException |
python | pytorch__pytorch | tools/stats/upload_utilization_stats/upload_utilization_stats.py | {
"start": 801,
"end": 3734
} | class ____:
"""
generates test segment from utilization records, currently it only generate segments on python commands level
segment_delta_threshold is the threshold to determine if a segment is continuous or not, default is 60 seconds.
"""
def generate(
self, records: list[UtilizationRecord], segment_delta_threshold: int = 60
) -> list[OssCiSegmentV1]:
if len(records) == 0:
return []
cmd_col_name = "cmd"
time_col_name = "time"
# flatten time series with detected cmds
df = pd.DataFrame(
[
{time_col_name: record.timestamp, cmd_col_name: process}
for record in records
for process in (record.cmd_names or [])
]
)
df[time_col_name] = pd.to_datetime(df[time_col_name], unit="s", utc=True)
# get unique cmd names
# pyrefly: ignore [bad-argument-type]
unique_cmds_df = pd.DataFrame(df[cmd_col_name].unique(), columns=[cmd_col_name])
# get all detected python cmds
cmd_list = unique_cmds_df[
unique_cmds_df[cmd_col_name].str.startswith("python")
][cmd_col_name].tolist()
# find segments by screening continuoues time series data
segments: list[OssCiSegmentV1] = []
for value in cmd_list:
subset = df[df[cmd_col_name] == value].copy()
continuous_segments = self._find_continuous_windows(
segment_delta_threshold, time_col_name, subset
)
for row in continuous_segments:
segment = OssCiSegmentV1(
level=CMD_PYTHON_LEVEL,
name=value,
start_at=int(row["start_time"].timestamp()),
end_at=int(row["end_time"].timestamp()),
extra_info={},
)
segments.append(segment)
print(
f"[Db Segments] detected pytest cmd: {len(cmd_list)}, generated segments: {len(segments)}"
)
return segments
def _find_continuous_windows(
self,
threshold: int,
time_column_name: str,
df: Any, # the lintrunner keep complaining about the type of df, but it's not a problem
) -> list[dict[str, Any]]:
time_threshold = pd.Timedelta(seconds=threshold)
df = df.sort_values(by=time_column_name).reset_index(drop=True)
df["time_diff"] = df[time_column_name].diff()
df["segment"] = (df["time_diff"] > time_threshold).cumsum()
segments = (
df.groupby("segment")
.agg(
start_time=(time_column_name, "first"),
end_time=(time_column_name, "last"),
)
.reset_index(drop=True)
)
return segments[["start_time", "end_time"]].to_dict(orient="records") # type: ignore[no-any-return]
| SegmentGenerator |
python | apache__airflow | providers/google/tests/unit/google/cloud/log/test_gcs_task_handler.py | {
"start": 1324,
"end": 13340
} | class ____:
@pytest.fixture(autouse=True)
def task_instance(self, create_task_instance, session):
self.ti = ti = create_task_instance(
dag_id="dag_for_testing_gcs_task_handler",
task_id="task_for_testing_gcs_task_handler",
logical_date=datetime(2020, 1, 1),
state=TaskInstanceState.RUNNING,
)
ti.try_number = 1
ti.raw = False
session.add(ti)
session.commit()
yield
clear_db_runs()
clear_db_dags()
@pytest.fixture(autouse=True)
def local_log_location(self, tmp_path_factory):
return str(tmp_path_factory.mktemp("local-gcs-log-location"))
@pytest.fixture(autouse=True)
def gcs_task_handler(self, create_log_template, local_log_location):
create_log_template("{try_number}.log")
self.gcs_task_handler = GCSTaskHandler(
base_log_folder=local_log_location,
gcs_log_folder="gs://bucket/remote/log/location",
)
return self.gcs_task_handler
@mock.patch("airflow.providers.google.cloud.log.gcs_task_handler.GCSHook")
@mock.patch("google.cloud.storage.Client")
@mock.patch("airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id")
@pytest.mark.parametrize(
"conn_id",
[pytest.param("", id="no-conn"), pytest.param("my_gcs_conn", id="with-conn")],
)
def test_client_conn_id_behavior(self, mock_get_cred, mock_client, mock_hook, conn_id):
"""When remote log conn id configured, hook will be used"""
mock_hook.return_value.get_credentials_and_project_id.return_value = (
"test_cred",
"test_proj",
)
mock_get_cred.return_value = ("test_cred", "test_proj")
with conf_vars({("logging", "remote_log_conn_id"): conn_id}):
return_value = self.gcs_task_handler.io.client
if conn_id:
mock_hook.assert_called_once_with(gcp_conn_id="my_gcs_conn")
mock_get_cred.assert_not_called()
else:
mock_hook.assert_not_called()
mock_get_cred.assert_called()
mock_client.assert_called_once_with(
client_info=mock.ANY, credentials="test_cred", project="test_proj"
)
assert mock_client.return_value == return_value
@conf_vars({("logging", "remote_log_conn_id"): "gcs_default"})
@mock.patch(
"airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id",
return_value=("TEST_CREDENTIALS", "TEST_PROJECT_ID"),
)
@mock.patch("google.cloud.storage.Client")
@mock.patch("google.cloud.storage.Blob")
def test_should_read_logs_from_remote(
self, mock_blob, mock_client, mock_creds, session, sdk_connection_not_found
):
mock_obj = MagicMock()
mock_obj.name = "remote/log/location/1.log"
mock_client.return_value.list_blobs.return_value = [mock_obj]
mock_blob.from_string.return_value.download_as_bytes.return_value = b"CONTENT"
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
session.add(ti)
session.commit()
logs, metadata = self.gcs_task_handler._read(ti, self.ti.try_number)
expected_gs_uri = f"gs://bucket/{mock_obj.name}"
mock_blob.from_string.assert_called_once_with(expected_gs_uri, mock_client.return_value)
if AIRFLOW_V_3_0_PLUS:
logs = list(logs)
assert logs[0].event == "::group::Log message source details"
assert logs[0].sources == [expected_gs_uri]
assert logs[1].event == "::endgroup::"
assert logs[2].event == "CONTENT"
assert metadata == {"end_of_log": True, "log_pos": 1}
else:
assert f"*** Found remote logs:\n*** * {expected_gs_uri}\n" in logs
assert logs.endswith("CONTENT")
assert metadata == {"end_of_log": True, "log_pos": 7}
@mock.patch(
"airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id",
return_value=("TEST_CREDENTIALS", "TEST_PROJECT_ID"),
)
@mock.patch("google.cloud.storage.Client")
@mock.patch("google.cloud.storage.Blob")
def test_should_read_from_local_on_logs_read_error(self, mock_blob, mock_client, mock_creds):
mock_obj = MagicMock()
mock_obj.name = "remote/log/location/1.log"
mock_client.return_value.list_blobs.return_value = [mock_obj]
mock_blob.from_string.return_value.download_as_bytes.side_effect = Exception("Failed to connect")
self.gcs_task_handler.set_context(self.ti)
ti = copy.copy(self.ti)
ti.state = TaskInstanceState.SUCCESS
log, metadata = self.gcs_task_handler._read(ti, self.ti.try_number)
expected_gs_uri = f"gs://bucket/{mock_obj.name}"
if AIRFLOW_V_3_0_PLUS:
log = list(log)
assert log[0].event == "::group::Log message source details"
assert log[0].sources == [
expected_gs_uri,
f"{self.gcs_task_handler.local_base}/1.log",
]
assert log[1].event == "::endgroup::"
assert metadata == {"end_of_log": True, "log_pos": 0}
else:
assert (
"*** Found remote logs:\n"
"*** * gs://bucket/remote/log/location/1.log\n"
"*** Unable to read remote log Failed to connect\n"
"*** Found local files:\n"
f"*** * {self.gcs_task_handler.local_base}/1.log\n"
) in log
assert metadata == {"end_of_log": True, "log_pos": 0}
mock_blob.from_string.assert_called_once_with(expected_gs_uri, mock_client.return_value)
@mock.patch(
"airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id",
return_value=("TEST_CREDENTIALS", "TEST_PROJECT_ID"),
)
@mock.patch("google.cloud.storage.Client")
@mock.patch("google.cloud.storage.Blob")
def test_write_to_remote_on_close(self, mock_blob, mock_client, mock_creds):
mock_blob.from_string.return_value.download_as_bytes.return_value = b"CONTENT"
self.gcs_task_handler.set_context(self.ti)
self.gcs_task_handler.emit(
logging.LogRecord(
name="NAME",
level="DEBUG",
pathname=None,
lineno=None,
msg="MESSAGE",
args=None,
exc_info=None,
)
)
self.gcs_task_handler.close()
mock_blob.assert_has_calls(
[
mock.call.from_string("gs://bucket/remote/log/location/1.log", mock_client.return_value),
mock.call.from_string().download_as_bytes(),
mock.call.from_string("gs://bucket/remote/log/location/1.log", mock_client.return_value),
mock.call.from_string().upload_from_string("CONTENT\nMESSAGE\n", content_type="text/plain"),
],
any_order=False,
)
mock_blob.from_string.return_value.upload_from_string(data="CONTENT\nMESSAGE\n")
assert self.gcs_task_handler.closed is True
@mock.patch(
"airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id",
return_value=("TEST_CREDENTIALS", "TEST_PROJECT_ID"),
)
@mock.patch("google.cloud.storage.Client")
@mock.patch("google.cloud.storage.Blob")
def test_failed_write_to_remote_on_close(self, mock_blob, mock_client, mock_creds, caplog):
caplog.at_level(logging.ERROR, logger=self.gcs_task_handler.log.name)
mock_blob.from_string.return_value.upload_from_string.side_effect = Exception("Failed to connect")
mock_blob.from_string.return_value.download_as_bytes.return_value = b"Old log"
self.gcs_task_handler.set_context(self.ti)
self.gcs_task_handler.emit(
logging.LogRecord(
name="NAME",
level="DEBUG",
pathname=None,
lineno=None,
msg="MESSAGE",
args=None,
exc_info=None,
)
)
self.gcs_task_handler.close()
assert caplog.record_tuples == [
(
"airflow.providers.google.cloud.log.gcs_task_handler.GCSRemoteLogIO",
logging.ERROR,
"Could not write logs to gs://bucket/remote/log/location/1.log: Failed to connect",
),
]
mock_blob.assert_has_calls(
[
mock.call.from_string("gs://bucket/remote/log/location/1.log", mock_client.return_value),
mock.call.from_string().download_as_bytes(),
mock.call.from_string("gs://bucket/remote/log/location/1.log", mock_client.return_value),
mock.call.from_string().upload_from_string("Old log\nMESSAGE\n", content_type="text/plain"),
],
any_order=False,
)
@mock.patch(
"airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id",
return_value=("TEST_CREDENTIALS", "TEST_PROJECT_ID"),
)
@mock.patch("google.cloud.storage.Client")
@mock.patch("google.cloud.storage.Blob")
def test_write_to_remote_on_close_failed_read_old_logs(self, mock_blob, mock_client, mock_creds):
mock_blob.from_string.return_value.download_as_bytes.side_effect = Exception("Fail to download")
self.gcs_task_handler.set_context(self.ti)
self.gcs_task_handler.emit(
logging.LogRecord(
name="NAME",
level="DEBUG",
pathname=None,
lineno=None,
msg="MESSAGE",
args=None,
exc_info=None,
)
)
self.gcs_task_handler.close()
mock_blob.from_string.assert_has_calls(
[
mock.call("gs://bucket/remote/log/location/1.log", mock_client.return_value),
mock.call().download_as_bytes(),
mock.call("gs://bucket/remote/log/location/1.log", mock_client.return_value),
mock.call().upload_from_string(
"MESSAGE\n",
content_type="text/plain",
),
],
any_order=False,
)
@pytest.mark.parametrize(
("delete_local_copy", "expected_existence_of_local_copy"),
[(True, False), (False, True)],
)
@mock.patch(
"airflow.providers.google.cloud.log.gcs_task_handler.get_credentials_and_project_id",
return_value=("TEST_CREDENTIALS", "TEST_PROJECT_ID"),
)
@mock.patch("google.cloud.storage.Client")
@mock.patch("google.cloud.storage.Blob")
def test_close_with_delete_local_copy_conf(
self,
mock_blob,
mock_client,
mock_creds,
local_log_location,
delete_local_copy,
expected_existence_of_local_copy,
):
mock_blob.from_string.return_value.download_as_bytes.return_value = b"CONTENT"
with conf_vars({("logging", "delete_local_logs"): str(delete_local_copy)}):
handler = GCSTaskHandler(
base_log_folder=local_log_location,
gcs_log_folder="gs://bucket/remote/log/location",
)
handler.log.info("test")
handler.set_context(self.ti)
assert handler.upload_on_close
handler.close()
assert os.path.exists(handler.handler.baseFilename) == expected_existence_of_local_copy
@pytest.fixture(autouse=True)
def test_filename_template_for_backward_compatibility(self, local_log_location):
# filename_template arg support for running the latest provider on airflow 2
GCSTaskHandler(
base_log_folder=local_log_location,
gcs_log_folder="gs://bucket/remote/log/location",
filename_template=None,
)
| TestGCSTaskHandler |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image_anchor02.py | {
"start": 315,
"end": 1246
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image_anchor02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"positioning": 2})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"positioning": 2})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ansible__ansible | lib/ansible/plugins/doc_fragments/vars_plugin_staging.py | {
"start": 196,
"end": 896
} | class ____(object):
DOCUMENTATION = r"""
options:
stage:
description:
- Control when this vars plugin may be executed.
- Setting this option to V(all) will run the vars plugin after importing inventory and whenever it is demanded by a task.
- Setting this option to V(task) will only run the vars plugin whenever it is demanded by a task.
- Setting this option to V(inventory) will only run the vars plugin after parsing inventory.
- If this option is omitted, the global C(RUN_VARS_PLUGINS) configuration is used to determine when to execute the vars plugin.
choices: ['all', 'task', 'inventory']
version_added: "2.10"
type: str
"""
| ModuleDocFragment |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 10375,
"end": 10548
} | class ____(PydanticValueError):
def __init__(self, *, limit_value: Union[int, float, Decimal]) -> None:
super().__init__(limit_value=limit_value)
| _NumberBoundError |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 169722,
"end": 169989
} | class ____(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
| SendmsgSCTPStreamTest |
python | apache__airflow | providers/common/compat/src/airflow/providers/common/compat/lineage/entities.py | {
"start": 1181,
"end": 1443
} | class ____:
"""User entity. Identifies a user."""
email: str = attr.ib()
first_name: str | None = None
last_name: str | None = None
template_fields: ClassVar = ("email", "first_name", "last_name")
@attr.s(auto_attribs=True, kw_only=True)
| User |
python | scipy__scipy | scipy/sparse/tests/test_64bit.py | {
"start": 3364,
"end": 3976
} | class ____:
def _check_resiliency(self, cls, method_name, **kw):
# Resiliency test, to check that sparse matrices deal reasonably
# with varying index data types.
@with_64bit_maxval_limit(**kw)
def check(cls, method_name):
instance = cls()
if hasattr(instance, 'setup_method'):
instance.setup_method()
try:
getattr(instance, method_name)()
finally:
if hasattr(instance, 'teardown_method'):
instance.teardown_method()
check(cls, method_name)
| RunAll64Bit |
python | PyCQA__pylint | doc/data/messages/m/multiple-class-sub-patterns/good.py | {
"start": 0,
"end": 275
} | class ____:
__match_args__ = ("title", "year")
def __init__(self, title, year):
self.title = title
self.year = year
def func(item: Book):
match item:
case Book(title="abc"):
...
case Book(year=2000):
...
| Book |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/generic_utils.py | {
"start": 5497,
"end": 6879
} | class ____(object):
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find already-loaded
object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
| SharedObjectLoadingScope |
python | tensorflow__tensorflow | tensorflow/python/keras/mixed_precision/test_util.py | {
"start": 8267,
"end": 8420
} | class ____(regularizers.Regularizer):
def __call__(self, x):
return math_ops.reduce_sum(x)
def get_config(self):
return {}
| ReduceSumRegularizer |
python | django__django | tests/known_related_objects/models.py | {
"start": 495,
"end": 740
} | class ____(models.Model):
name = models.CharField(max_length=30)
pool = models.OneToOneField(Pool, models.CASCADE)
another_pool = models.OneToOneField(
Pool, models.CASCADE, null=True, related_name="another_style"
)
| PoolStyle |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/glib2.py | {
"start": 1085,
"end": 4722
} | class ____(Task.Task):
vars = ['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL']
color = 'BLUE'
ext_out = ['.h']
def run(self):
bld = self.generator.bld
get = self.env.get_flat
cmd1 = "%s %s --prefix=%s --header > %s" % (
get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[0].abspath()
)
ret = bld.exec_command(cmd1)
if ret:
return ret
c = '''#include "%s"\n''' % self.outputs[0].name
self.outputs[1].write(c)
cmd2 = "%s %s --prefix=%s --body >> %s" % (
get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[1].abspath()
)
return bld.exec_command(cmd2)
@taskgen_method
def add_enums_from_template(self, source='', target='', template='', comments=''):
if not hasattr(self, 'enums_list'):
self.enums_list = []
self.meths.append('process_enums')
self.enums_list.append({
'source': source,
'target': target,
'template': template,
'file-head': '',
'file-prod': '',
'file-tail': '',
'enum-prod': '',
'value-head': '',
'value-prod': '',
'value-tail': '',
'comments': comments
})
@taskgen_method
def add_enums(
self,
source='',
target='',
file_head='',
file_prod='',
file_tail='',
enum_prod='',
value_head='',
value_prod='',
value_tail='',
comments=''
):
if not hasattr(self, 'enums_list'):
self.enums_list = []
self.meths.append('process_enums')
self.enums_list.append({
'source': source,
'template': '',
'target': target,
'file-head': file_head,
'file-prod': file_prod,
'file-tail': file_tail,
'enum-prod': enum_prod,
'value-head': value_head,
'value-prod': value_prod,
'value-tail': value_tail,
'comments': comments
})
@before_method('process_source')
def process_enums(self):
for enum in getattr(self, 'enums_list', []):
task = self.create_task('glib_mkenums')
env = task.env
inputs = []
source_list = self.to_list(enum['source'])
if not source_list:
raise Errors.WafError('missing source ' + str(enum))
source_list = [self.path.find_resource(k) for k in source_list]
inputs += source_list
env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list]
if not enum['target']:
raise Errors.WafError('missing target ' + str(enum))
tgt_node = self.path.find_or_declare(enum['target'])
if tgt_node.name.endswith('.c'):
self.source.append(tgt_node)
env.GLIB_MKENUMS_TARGET = tgt_node.abspath()
options = []
if enum['template']:
template_node = self.path.find_resource(enum['template'])
options.append('--template %s' % (template_node.abspath()))
inputs.append(template_node)
params = {
'file-head': '--fhead',
'file-prod': '--fprod',
'file-tail': '--ftail',
'enum-prod': '--eprod',
'value-head': '--vhead',
'value-prod': '--vprod',
'value-tail': '--vtail',
'comments': '--comments'
}
for param, option in params.items():
if enum[param]:
options.append('%s %r' % (option, enum[param]))
env.GLIB_MKENUMS_OPTIONS = ' '.join(options)
task.set_inputs(inputs)
task.set_outputs(tgt_node)
| glib_genmarshal |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 1758,
"end": 1892
} | class ____:
def say_hello(self):
"""inherited"""
@classmethod
def say_goodbye(cls):
"""inherited"""
| Greeter |
python | numba__numba | numba/tests/test_array_exprs.py | {
"start": 617,
"end": 2081
} | class ____(dict):
def __getattr__(s, k):
return s[k] if k in s else super(Namespace, s).__getattr__(k)
def axy(a, x, y):
return a * x + y
def ax2(a, x, y):
return a * x + y
def pos_root(As, Bs, Cs):
return (-Bs + (((Bs ** 2.) - (4. * As * Cs)) ** 0.5)) / (2. * As)
def neg_root_common_subexpr(As, Bs, Cs):
_2As = 2. * As
_4AsCs = 2. * _2As * Cs
_Bs2_4AsCs = (Bs ** 2. - _4AsCs)
return (-Bs - (_Bs2_4AsCs ** 0.5)) / _2As
def neg_root_complex_subexpr(As, Bs, Cs):
_2As = 2. * As
_4AsCs = 2. * _2As * Cs
_Bs2_4AsCs = (Bs ** 2. - _4AsCs) + 0j # Force into the complex domain.
return (-Bs - (_Bs2_4AsCs ** 0.5)) / _2As
vaxy = vectorize(axy)
def call_stuff(a0, a1):
return np.cos(vaxy(a0, np.sin(a1) - 1., 1.))
def are_roots_imaginary(As, Bs, Cs):
return (Bs ** 2 - 4 * As * Cs) < 0
def div_add(As, Bs, Cs):
return As / Bs + Cs
def cube(As):
return As ** 3
def explicit_output(a, b, out):
np.cos(a, out)
return np.add(out, b, out)
def variable_name_reuse(a, b, c, d):
u = a + b
u = u - a * b
u = u * c + d
return u
# From issue #1264
def distance_matrix(vectors):
n_vectors = vectors.shape[0]
result = np.empty((n_vectors, n_vectors), dtype=np.float64)
for i in range(n_vectors):
for j in range(i, n_vectors):
result[i,j] = result[j,i] = np.sum(
(vectors[i] - vectors[j]) ** 2) ** 0.5
return result
| Namespace |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_client_event.py | {
"start": 1061,
"end": 1839
} | class ____(BaseModel):
type: Literal["output_audio_buffer.clear"]
"""The event type, must be `output_audio_buffer.clear`."""
event_id: Optional[str] = None
"""The unique ID of the client event used for error handling."""
RealtimeClientEvent: TypeAlias = Annotated[
Union[
ConversationItemCreateEvent,
ConversationItemDeleteEvent,
ConversationItemRetrieveEvent,
ConversationItemTruncateEvent,
InputAudioBufferAppendEvent,
InputAudioBufferClearEvent,
OutputAudioBufferClear,
InputAudioBufferCommitEvent,
ResponseCancelEvent,
ResponseCreateEvent,
SessionUpdateEvent,
TranscriptionSessionUpdate,
],
PropertyInfo(discriminator="type"),
]
| OutputAudioBufferClear |
python | yaml__pyyaml | tests/legacy_tests/canonical.py | {
"start": 110,
"end": 6850
} | class ____:
def __init__(self, data):
if isinstance(data, bytes):
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
raise CanonicalError("utf-8 stream is expected")
self.data = data+'\0'
self.index = 0
self.tokens = []
self.scanned = False
def check_token(self, *choices):
if not self.scanned:
self.scan()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
if not self.scanned:
self.scan()
if self.tokens:
return self.tokens[0]
def get_token(self, choice=None):
if not self.scanned:
self.scan()
token = self.tokens.pop(0)
if choice and not isinstance(token, choice):
raise CanonicalError("unexpected token "+repr(token))
return token
def get_token_value(self):
token = self.get_token()
return token.value
def scan(self):
self.tokens.append(yaml.StreamStartToken(None, None))
while True:
self.find_token()
ch = self.data[self.index]
if ch == '\0':
self.tokens.append(yaml.StreamEndToken(None, None))
break
elif ch == '%':
self.tokens.append(self.scan_directive())
elif ch == '-' and self.data[self.index:self.index+3] == '---':
self.index += 3
self.tokens.append(yaml.DocumentStartToken(None, None))
elif ch == '[':
self.index += 1
self.tokens.append(yaml.FlowSequenceStartToken(None, None))
elif ch == '{':
self.index += 1
self.tokens.append(yaml.FlowMappingStartToken(None, None))
elif ch == ']':
self.index += 1
self.tokens.append(yaml.FlowSequenceEndToken(None, None))
elif ch == '}':
self.index += 1
self.tokens.append(yaml.FlowMappingEndToken(None, None))
elif ch == '?':
self.index += 1
self.tokens.append(yaml.KeyToken(None, None))
elif ch == ':':
self.index += 1
self.tokens.append(yaml.ValueToken(None, None))
elif ch == ',':
self.index += 1
self.tokens.append(yaml.FlowEntryToken(None, None))
elif ch == '*' or ch == '&':
self.tokens.append(self.scan_alias())
elif ch == '!':
self.tokens.append(self.scan_tag())
elif ch == '"':
self.tokens.append(self.scan_scalar())
else:
raise CanonicalError("invalid token")
self.scanned = True
DIRECTIVE = '%YAML 1.1'
def scan_directive(self):
if self.data[self.index:self.index+len(self.DIRECTIVE)] == self.DIRECTIVE and \
self.data[self.index+len(self.DIRECTIVE)] in ' \n\0':
self.index += len(self.DIRECTIVE)
return yaml.DirectiveToken('YAML', (1, 1), None, None)
else:
raise CanonicalError("invalid directive")
def scan_alias(self):
if self.data[self.index] == '*':
TokenClass = yaml.AliasToken
else:
TokenClass = yaml.AnchorToken
self.index += 1
start = self.index
while self.data[self.index] not in ', \n\0':
self.index += 1
value = self.data[start:self.index]
return TokenClass(value, None, None)
def scan_tag(self):
self.index += 1
start = self.index
while self.data[self.index] not in ' \n\0':
self.index += 1
value = self.data[start:self.index]
if not value:
value = '!'
elif value[0] == '!':
value = 'tag:yaml.org,2002:'+value[1:]
elif value[0] == '<' and value[-1] == '>':
value = value[1:-1]
else:
value = '!'+value
return yaml.TagToken(value, None, None)
QUOTE_CODES = {
'x': 2,
'u': 4,
'U': 8,
}
QUOTE_REPLACES = {
'\\': '\\',
'\"': '\"',
' ': ' ',
'a': '\x07',
'b': '\x08',
'e': '\x1B',
'f': '\x0C',
'n': '\x0A',
'r': '\x0D',
't': '\x09',
'v': '\x0B',
'N': '\u0085',
'L': '\u2028',
'P': '\u2029',
'_': '_',
'0': '\x00',
}
def scan_scalar(self):
self.index += 1
chunks = []
start = self.index
ignore_spaces = False
while self.data[self.index] != '"':
if self.data[self.index] == '\\':
ignore_spaces = False
chunks.append(self.data[start:self.index])
self.index += 1
ch = self.data[self.index]
self.index += 1
if ch == '\n':
ignore_spaces = True
elif ch in self.QUOTE_CODES:
length = self.QUOTE_CODES[ch]
code = int(self.data[self.index:self.index+length], 16)
chunks.append(chr(code))
self.index += length
else:
if ch not in self.QUOTE_REPLACES:
raise CanonicalError("invalid escape code")
chunks.append(self.QUOTE_REPLACES[ch])
start = self.index
elif self.data[self.index] == '\n':
chunks.append(self.data[start:self.index])
chunks.append(' ')
self.index += 1
start = self.index
ignore_spaces = True
elif ignore_spaces and self.data[self.index] == ' ':
self.index += 1
start = self.index
else:
ignore_spaces = False
self.index += 1
chunks.append(self.data[start:self.index])
self.index += 1
return yaml.ScalarToken(''.join(chunks), False, None, None)
def find_token(self):
found = False
while not found:
while self.data[self.index] in ' \t':
self.index += 1
if self.data[self.index] == '#':
while self.data[self.index] != '\n':
self.index += 1
if self.data[self.index] == '\n':
self.index += 1
else:
found = True
| CanonicalScanner |
python | pandas-dev__pandas | pandas/_testing/__init__.py | {
"start": 8860,
"end": 9534
} | class ____(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
# For testing, those properties return a generic callable, and not
# the actual class. In this case that is equivalent, but it is to
# ensure we don't rely on the property returning a class
# See https://github.com/pandas-dev/pandas/pull/46018 and
# https://github.com/pandas-dev/pandas/issues/32638 and linked issues
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
@property
def _constructor_expanddim(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
| SubclassedSeries |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modular_qwen3_vl.py | {
"start": 61319,
"end": 71782
} | class ____(Qwen2VLProcessor):
r"""
Constructs a Qwen3VL processor which wraps a Qwen3VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen3VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen3VLProcessor.__call__`] and [`~Qwen3VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen3VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
super().__init__(image_processor, tokenizer, video_processor, chat_template, **kwargs)
self.vision_start_token = (
"<|vision_start|>" if not hasattr(tokenizer, "vision_start_token") else tokenizer.vision_start_token
)
self.vision_end_token = (
"<|vision_end|>" if not hasattr(tokenizer, "vision_end_token") else tokenizer.vision_end_token
)
self.vision_start_token_id = (
tokenizer.vision_start_token_id
if getattr(tokenizer, "vision_start_token_id", None)
else tokenizer.convert_tokens_to_ids(self.vision_start_token)
)
self.vision_end_token_id = (
tokenizer.vision_end_token_id
if getattr(tokenizer, "vision_end_token_id", None)
else tokenizer.convert_tokens_to_ids(self.vision_end_token)
)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: VideoInput = None,
**kwargs: Unpack[Qwen3VLProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Qwen3VLProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
video_grid_thw = videos_inputs["video_grid_thw"]
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
metadata = video_metadata[index]
if metadata.fps is None:
logger.warning_once(
"Qwen3VL requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
# if timestamps are not provided, calculate them
curr_timestamp = self._calculate_timestamps(
metadata.frames_indices,
metadata.fps,
self.video_processor.merge_size,
)
video_placeholder = ""
frame_seqlen = video_grid_thw[index][1:].prod() // merge_length
for frame_idx in range(video_grid_thw[index][0]):
curr_time = curr_timestamp[frame_idx]
video_placeholder += f"<{curr_time:.1f} seconds>"
video_placeholder += (
self.vision_start_token + "<|placeholder|>" * frame_seqlen + self.vision_end_token
)
if f"{self.vision_start_token}{self.video_token}{self.vision_end_token}" in text[i]:
text[i] = text[i].replace(
f"{self.vision_start_token}{self.video_token}{self.vision_end_token}", video_placeholder, 1
)
else:
# vllm may input video token directly
text[i] = text[i].replace(self.video_token, video_placeholder, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.video_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _calculate_timestamps(self, indices: Union[list[int], np.ndarray], video_fps: float, merge_size: int = 2):
if not isinstance(indices, list):
indices = indices.tolist()
if len(indices) % merge_size != 0:
indices.extend(indices[-1] for _ in range(merge_size - len(indices) % merge_size))
timestamps = [idx / video_fps for idx in indices]
# @JJJYmmm frames are merged by self.merge_size, \
# so we need to average the timestamps between the first/last frame within the temporal patch
timestamps = [
(timestamps[i] + timestamps[i + merge_size - 1]) / 2 for i in range(0, len(timestamps), merge_size)
]
return timestamps
__all__ = [
"Qwen3VLConfig",
"Qwen3VLTextConfig",
"Qwen3VLVisionModel",
"Qwen3VLForConditionalGeneration",
"Qwen3VLModel",
"Qwen3VLPreTrainedModel",
"Qwen3VLProcessor",
"Qwen3VLTextModel",
]
| Qwen3VLProcessor |
python | doocs__leetcode | solution/0400-0499/0413.Arithmetic Slices/Solution.py | {
"start": 0,
"end": 317
} | class ____:
def numberOfArithmeticSlices(self, nums: List[int]) -> int:
ans = cnt = 0
d = 3000
for a, b in pairwise(nums):
if b - a == d:
cnt += 1
else:
d = b - a
cnt = 0
ans += cnt
return ans
| Solution |
python | google__pytype | pytype/pytd/serialize_ast.py | {
"start": 1718,
"end": 2244
} | class ____(visitors.Visitor):
"""Visitor to clear out the lookup caches of TypeDeclUnits and Classes.
The lookup caches of TypeDeclUnits and Classes do not need to be serialized.
Ideally, these would be private fields but those are not yet implemented.
(https://github.com/jcrist/msgspec/issues/199)
"""
def LeaveClass(self, node):
node._name2item.clear() # pylint: disable=protected-access
def LeaveTypeDeclUnit(self, node):
node._name2item.clear() # pylint: disable=protected-access
| ClearLookupCache |
python | PrefectHQ__prefect | tests/runner/test_runner.py | {
"start": 140465,
"end": 141256
} | class ____:
def test_adds_default_registry_url(self):
with temporary_settings(
{PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE: "alltheimages.com/my-org"}
):
image = DockerImage(name="test-image")
assert image.name == "alltheimages.com/my-org/test-image"
def test_override_default_registry_url(self):
with temporary_settings(
{PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE: "alltheimages.com/my-org"}
):
image = DockerImage(name="otherimages.com/my-org/test-image")
assert image.name == "otherimages.com/my-org/test-image"
def test_no_default_registry_url_by_default(self):
image = DockerImage(name="my-org/test-image")
assert image.name == "my-org/test-image"
| TestDockerImage |
python | huggingface__transformers | src/transformers/tokenization_python.py | {
"start": 15034,
"end": 60713
} | class ____(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
# 1. Init the parent class
self.tokens_trie = Trie()
# 2. init `_added_tokens_decoder` if child class did not
if not hasattr(self, "_added_tokens_decoder"):
self._added_tokens_decoder: dict[int, AddedToken] = {}
# 3. if a `added_tokens_decoder` is passed, we are loading from a saved tokenizer, we overwrite
self._added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {}))
self._added_tokens_encoder: dict[str, int] = {k.content: v for v, k in self._added_tokens_decoder.items()}
# 4. Token type ID configuration for dynamic mask building
# These can be overridden by subclasses to avoid overriding create_token_type_ids_from_sequences
self.token_type_ids_pattern = kwargs.pop("token_type_ids_pattern", "bert_style") # "all_zeros" or "bert_style"
self.token_type_ids_include_special_tokens = kwargs.pop("token_type_ids_include_special_tokens", True)
# 5. Special tokens mask configuration
# Patterns: "none", "cls_sep", "eos", "bos", "bos_eos", "cls_double_sep", "prefix_suffix"
self.special_tokens_pattern = kwargs.pop("special_tokens_pattern", "cls_sep")
# 6. Set backend to "custom" if not already set (for direct PreTrainedTokenizer subclasses)
if "backend" not in kwargs:
kwargs["backend"] = "custom"
# 7. init the parent class
super().__init__(**kwargs)
if self._added_tokens_decoder:
self._update_total_vocab_size()
# 4. If some of the special tokens are not part of the vocab, we add them, at the end.
# V5: the order of addition follows self.SPECIAL_TOKENS_ATTRIBUTES, then extra special tokens
# Note: _add_tokens will automatically skip tokens that are already in the base vocab
self._add_tokens(
[token for token in self.all_special_tokens if token not in self._added_tokens_encoder],
special_tokens=True,
)
self._update_total_vocab_size()
@property
def is_fast(self) -> bool:
return False
@property
def added_tokens_encoder(self) -> dict[str, int]:
"""
Returns the sorted mapping from string to index. The added tokens encoder is cached for performance
optimisation in `self._added_tokens_encoder` for the slow tokenizers.
"""
return {k.content: v for v, k in sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])}
@property
def added_tokens_decoder(self) -> dict[int, AddedToken]:
"""
Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.
Returns:
`dict[str, int]`: The added tokens.
"""
return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0]))
@added_tokens_decoder.setter
def added_tokens_decoder(self, value: dict[int, AddedToken | str]) -> dict[int, AddedToken]:
# Always raise an error if string because users should define the behavior
for index, token in value.items():
if not isinstance(token, (str, AddedToken)) or not isinstance(index, int):
raise TypeError(
f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, AddedToken | str}"
)
self._added_tokens_decoder[index] = AddedToken(token) if isinstance(token, str) else token
self._added_tokens_encoder[str(token)] = index
self._update_total_vocab_size()
def get_added_vocab(self) -> dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from
the fast call because for now we always add the tokens even if they are already in the vocabulary. This is
something we should change.
Returns:
`dict[str, int]`: The added tokens.
"""
return self._added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.total_vocab_size
def _update_total_vocab_size(self):
"""
Update the size of the full vocabulary with the added tokens. Counts the `keys` and not the `values` because
otherwise if there is a hole in the vocab, we will add tokenizers at a wrong index. This operation is slow and
is only updated when adding tokens.
"""
self.total_vocab_size = len(self.get_vocab())
def _add_tokens(self, new_tokens: list[str] | list[AddedToken], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary. Special tokens are sometimes already in the
vocab which is why they have to be handled specifically.
Args:
new_tokens (`list[str]`or `list[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is counted as added if it's not already in the vocabulary
(tested by checking if the tokenizer assign the index of the `unk_token` to them). If a token is part
of the vocabulary then we simply mark this token as an `AddedToken` which allows to control the
stripping and normalization of this token. This is NOT possible in `tokenizers`.
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
model = BertModel.from_pretrained("google-bert/bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
added_tokens = 0
if new_tokens is None:
return added_tokens
# TODO this is fairly slow to improve!
current_vocab = self.get_vocab().copy()
new_idx = len(current_vocab) # only call this once, len gives the last index + 1
for token in new_tokens:
if not isinstance(token, (str, AddedToken)):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if str(token) == "":
continue
if isinstance(token, str):
if token in self._added_tokens_encoder:
continue
else:
# very important for fast and slow equivalence!
is_special = token in self.all_special_tokens or special_tokens
token = AddedToken(
token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special
)
elif special_tokens:
# doing token.special=True changes the normalization! will fix in rust
# this is important and the only reason why the AddedTokens in each class are normalized by default
token.__setstate__({"special": True, "normalized": token.normalized})
if token in self._added_tokens_decoder:
continue
if not token.special and token.normalized and getattr(self, "do_lower_case", False):
# Normalize if requested
token.content = token.content.lower()
if token.content not in current_vocab:
token_index = new_idx + added_tokens
current_vocab[token.content] = token_index
added_tokens += 1
else:
token_index = current_vocab[token.content]
if token.special and str(token) not in self.all_special_tokens:
self._extra_special_tokens.append(token)
# the setter automatically updates the reverse map
self._added_tokens_decoder[token_index] = token
self._added_tokens_encoder[token.content] = token_index
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
self._update_trie()
self._update_total_vocab_size()
return added_tokens
def _update_trie(self, unique_no_split_tokens: list[str] | None = None):
for token in self._added_tokens_decoder.values():
if token.content not in self.tokens_trie._tokens:
self.tokens_trie.add(token.content)
for token in unique_no_split_tokens or []:
if token not in self.tokens_trie._tokens:
self.tokens_trie.add(token)
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
this inside your training loop.
</Tip>
Args:
pair (`bool`, *optional*, defaults to `False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
`int`: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def tokenize(self, text: TextInput, **kwargs) -> list[str]:
"""
Converts a string into a sequence of tokens, using the tokenizer.
Args:
text: The sequence to be encoded.
**kwargs: Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
Returns:
The list of tokens.
"""
split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens)
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
if split_special_tokens:
# Don't split on any tokens - just tokenize directly
return self._tokenize(text)
# Split on added tokens
tokens = self.tokens_trie.split(text)
no_split_token = self._added_tokens_encoder.keys()
# Handle added token properties (lstrip, rstrip, single_word)
for i, token in enumerate(tokens):
if token in no_split_token:
tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token])
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
if isinstance(tok_extended, AddedToken):
if tok_extended.rstrip and right:
tokens[i + 1] = right.lstrip()
if tok_extended.lstrip and left:
tokens[i - 1] = left.rstrip()
if tok_extended.single_word:
if left and left[-1] != " ":
tokens[i - 1] += token
tokens[i] = ""
elif right and right[0] != " ":
tokens[i + 1] = token + tokens[i + 1]
tokens[i] = ""
# Tokenize non-added tokens
result = []
all_special_tokens_set = set(self.all_special_tokens)
for token in tokens:
if not token:
continue
if token in no_split_token or token in all_special_tokens_set:
result.append(token)
else:
result.extend(self._tokenize(token))
return result
def _tokenize(self, text, **kwargs):
"""
Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def _convert_token_to_id_with_added_voc(self, token):
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def _encode_plus(
self,
text: TextInput | PreTokenizedInput | EncodedInput,
text_pair: TextInput | PreTokenizedInput | EncodedInput | None = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: int | None = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: str | TensorType | None = None,
return_token_type_ids: bool | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# Detect batched inputs (list of sequences)
is_batched = isinstance(text, (list, tuple)) and (
(not text and not is_split_into_words)
or (text and is_split_into_words and isinstance(text[0], (list, tuple)))
or (text and not is_split_into_words and isinstance(text[0], (str, list, tuple)))
)
if is_batched:
if text_pair is not None:
if not isinstance(text_pair, (list, tuple)) or len(text_pair) != len(text):
raise ValueError("If `text` is a batch, `text_pair` must also be a batch of the same length.")
pairs = text_pair if text_pair is not None else [None] * len(text)
batch_outputs = {}
for current_text, current_pair in zip(text, pairs):
# Handle tuples/lists as sequence pairs like ("text1", "text2")
# For is_split_into_words=True: only unpack if it's a tuple of exactly 2 sequences (pair)
# Otherwise, treat the list as a single pretokenized sequence
if (
isinstance(current_text, (list, tuple))
and current_text
and not isinstance(current_text[0], int)
and current_pair is None
):
# Check if this looks like a pair: tuple/list of length 2 where elements are strings or lists/tuples
is_pair = (
len(current_text) == 2
and (isinstance(current_text[0], str) or isinstance(current_text[0], (list, tuple)))
and (isinstance(current_text[1], str) or isinstance(current_text[1], (list, tuple)))
)
if is_pair:
current_text, current_pair = current_text
elif len(current_text) == 1:
current_text = current_text[0]
elif not is_split_into_words:
# Only raise error for non-pretokenized input
raise ValueError(f"Expected a pair of sequences, got {len(current_text)} sequences.")
current_output = self._encode_plus(
text=current_text,
text_pair=current_pair,
add_special_tokens=add_special_tokens,
padding_strategy=PaddingStrategy.DO_NOT_PAD, # we pad in batch afterward
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=None, # we pad in batch afterward
padding_side=None, # we pad in batch afterward
return_tensors=None, # We convert the whole batch to tensors at the end
return_token_type_ids=return_token_type_ids,
return_attention_mask=False, # we pad in batch afterward
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
**kwargs,
)
for key, value in current_output.items():
batch_outputs.setdefault(key, []).append(value)
# Remove overflow-related keys before tensor conversion if return_tensors is set
# Slow tokenizers don't support returning these as tensors
if return_tensors and return_overflowing_tokens:
batch_outputs.pop("overflowing_tokens", None)
batch_outputs.pop("num_truncated_tokens", None)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
# Single sequence handling
def get_input_ids(text):
if isinstance(text, str):
# Normal case: tokenize string
return self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
if isinstance(text, (list, tuple)) and text:
if isinstance(text[0], int):
return text
# Pre-tokenized strings
if isinstance(text[0], str):
if is_split_into_words:
return self.convert_tokens_to_ids(
[tok for word in text for tok in self.tokenize(word, **kwargs)]
)
return self.convert_tokens_to_ids(text)
raise ValueError(f"Input must be a string, list of strings, or list of ints, got: {type(text)}")
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def prepare_for_tokenization(
self, text: str, is_split_into_words: bool = False, **kwargs
) -> tuple[str, dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
text (`str`):
The text to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
kwargs (`dict[str, Any]`, *optional*):
Keyword arguments to use for the tokenization.
Returns:
`tuple[str, dict[str, Any]]`: The prepared text and the unused kwargs.
"""
return (text, kwargs)
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: list[int] | None = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequences by adding special tokens.
This method dynamically builds inputs based on the tokenizer's `special_tokens_pattern`:
- `"none"`: No special tokens
- `"cls_sep"`: [CLS] seq0 [SEP] or [CLS] seq0 [SEP] seq1 [SEP]
- `"eos"`: seq0 [EOS] or seq0 [EOS] seq1 [EOS]
- `"bos"`: [BOS] seq0 or [BOS] seq0 [BOS] seq1
- `"bos_eos"`: [BOS] seq0 [EOS] or [BOS] seq0 [EOS] seq1 [EOS]
- `"cls_double_sep"`: [CLS] seq0 [SEP] or [CLS] seq0 [SEP] [SEP] seq1 [SEP]
- `"prefix_suffix"`: `<prefix_tokens> seq0 [seq1] <suffix_tokens>` (custom prefix/suffix stored on the tokenizer)
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of input IDs with the appropriate special tokens.
"""
if self.special_tokens_pattern == "cls_sep":
# [CLS] seq0 [SEP] or [CLS] seq0 [SEP] seq1 [SEP]
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1 + [self.sep_token_id]
elif self.special_tokens_pattern == "eos":
# seq0 [EOS] or seq0 [EOS] seq1 [EOS]
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
return token_ids_0 + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
elif self.special_tokens_pattern == "bos":
# [BOS] seq0 or [BOS] seq0 [BOS] seq1
if token_ids_1 is None:
return [self.bos_token_id] + token_ids_0
return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1
elif self.special_tokens_pattern == "bos_eos":
# [BOS] seq0 [EOS] or [BOS] seq0 [EOS] seq1 [EOS]
if token_ids_1 is None:
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
elif self.special_tokens_pattern == "cls_double_sep":
# [CLS] seq0 [SEP] or [CLS] seq0 [SEP] [SEP] seq1 [SEP]
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
return (
[self.cls_token_id]
+ token_ids_0
+ [self.sep_token_id, self.sep_token_id]
+ token_ids_1
+ [self.sep_token_id]
)
elif self.special_tokens_pattern == "prefix_suffix":
prefix_tokens = getattr(self, "prefix_tokens", [])
suffix_tokens = getattr(self, "suffix_tokens", [])
if token_ids_1 is None:
return prefix_tokens + token_ids_0 + suffix_tokens
return prefix_tokens + token_ids_0 + token_ids_1 + suffix_tokens
else: # "none" or any other value
# No special tokens
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def get_special_tokens_mask(
self, token_ids_0: list, token_ids_1: list | None = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
This method dynamically builds the special tokens mask based on the tokenizer's `special_tokens_pattern`:
- `"none"`: No special tokens (default, returns all 0s)
- `"cls_sep"`: [CLS] seq0 [SEP] or [CLS] seq0 [SEP] seq1 [SEP]
- `"eos"`: seq0 [EOS] or seq0 [EOS] seq1 [EOS]
- `"bos"`: [BOS] seq0 or [BOS] seq0 [BOS] seq1
- `"bos_eos"`: [BOS] seq0 [EOS] or [BOS] seq0 [EOS] seq1 [EOS]
- `"cls_double_sep"`: [CLS] seq0 [SEP] or [CLS] seq0 [SEP] [SEP] seq1 [SEP]
- `"prefix_suffix"`: `<prefix_tokens> seq0 [seq1] <suffix_tokens>`
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if self.special_tokens_pattern == "cls_sep":
# [CLS] seq0 [SEP] or [CLS] seq0 [SEP] seq1 [SEP]
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
elif self.special_tokens_pattern == "eos":
# seq0 [EOS] or seq0 [EOS] seq1 [EOS]
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
elif self.special_tokens_pattern == "bos":
# [BOS] seq0 or [BOS] seq0 [BOS] seq1
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0))
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
elif self.special_tokens_pattern == "bos_eos":
# [BOS] seq0 [EOS] or [BOS] seq0 [EOS] seq1 [EOS]
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
elif self.special_tokens_pattern == "cls_double_sep":
# [CLS] seq0 [SEP] or [CLS] seq0 [SEP] [SEP] seq1 [SEP]
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
elif self.special_tokens_pattern == "prefix_suffix":
prefix_len = len(getattr(self, "prefix_tokens", []))
suffix_len = len(getattr(self, "suffix_tokens", []))
mask = [1] * prefix_len + ([0] * len(token_ids_0))
if token_ids_1 is not None:
mask += [0] * len(token_ids_1)
mask += [1] * suffix_len
return mask
else:
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str: ...
@overload
def convert_ids_to_tokens(self, ids: list[int], skip_special_tokens: bool = False) -> list[str]: ...
def convert_ids_to_tokens(self, ids: int | list[int], skip_special_tokens: bool = False) -> str | list[str]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `list[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `list[str]`: The decoded token(s).
"""
if isinstance(ids, int):
return (
self._added_tokens_decoder[ids].content
if ids in self._added_tokens_decoder
else self._convert_id_to_token(ids)
)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
tokens.append(
self._added_tokens_decoder[index].content
if index in self._added_tokens_decoder
else self._convert_id_to_token(index)
)
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: list[str]) -> str:
return " ".join(tokens)
def _decode(
self,
token_ids: int | list[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool | None = None,
**kwargs,
) -> str:
"""Decode token ids to string."""
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
if isinstance(filtered_tokens, str):
filtered_tokens = [filtered_tokens]
text = self.convert_tokens_to_string(filtered_tokens)
# Apply tokenizer-specific cleanup if available and requested
clean_up_tokenization_spaces = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
# Call custom cleanup method if it exists (e.g., for CLVP's [SPACE] token replacement)
if hasattr(self, "clean_up_tokenization") and callable(self.clean_up_tokenization):
text = self.clean_up_tokenization(text)
else:
# Otherwise apply standard cleanup
text = (
text.replace(" .", ".")
.replace(" ?", "?")
.replace(" !", "!")
.replace(" ,", ",")
.replace(" ' ", "'")
.replace(" n't", "n't")
.replace(" 'm", "'m")
.replace(" 's", "'s")
.replace(" 've", "'ve")
.replace(" 're", "'re")
)
return text
def prepare_for_model(
self,
ids: list[int],
pair_ids: list[int] | None = None,
add_special_tokens: bool = True,
padding: bool | str | PaddingStrategy = False,
truncation: bool | str | TruncationStrategy = False,
max_length: int | None = None,
stride: int = 0,
pad_to_multiple_of: int | None = None,
padding_side: str | None = None,
return_tensors: str | TensorType | None = None,
return_token_type_ids: bool | None = None,
return_attention_mask: bool | None = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs,
) -> BatchEncoding:
"""
Prepares a sequence of input ids so it can be used by the model. Adds special tokens, truncates, and pads.
Args:
ids: Tokenized input ids of the first sequence.
pair_ids: Tokenized input ids of the second sequence (optional).
"""
# Get padding/truncation strategies
padding_strategy, truncation_strategy, max_length, _ = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
# Validation
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
# Truncation
pair = pair_ids is not None
num_special = self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0
total_len = len(ids) + len(pair_ids or []) + num_special
overflowing_tokens = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ids + (pair_ids if pair_ids else [])
token_type_ids = [0] * len(sequence)
# Build output
encoded_inputs = {"input_ids": sequence}
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = (
self.get_special_tokens_mask(ids, pair_ids) if add_special_tokens else [0] * len(sequence)
)
if return_overflowing_tokens and not return_tensors and overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_length if max_length else 0
# Check sequence length and warn if needed
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
# Pad
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
return BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
def truncate_sequences(
self,
ids: list[int],
pair_ids: list[int] | None = None,
num_tokens_to_remove: int = 0,
truncation_strategy: str | TruncationStrategy = "longest_first",
stride: int = 0,
) -> tuple[list[int], list[int], list[int]]:
"""Truncates sequences according to the specified strategy."""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
# ONLY_FIRST or LONGEST_FIRST with single sequence
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
):
window_len = min(len(ids), stride + num_tokens_to_remove)
if self.truncation_side == "left":
overflowing_tokens = ids[:window_len]
ids = ids[num_tokens_to_remove:]
else:
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
# LONGEST_FIRST with pair
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(
"Be aware, overflowing tokens are not returned for the setting you have chosen,"
f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
"truncation strategy. So the returned list will always be empty even if some "
"tokens have been removed."
)
len_ids, len_pair = len(ids), len(pair_ids) if pair_ids else 0
first_remove = min(abs(len_pair - len_ids), num_tokens_to_remove)
second_remove = num_tokens_to_remove - first_remove
if len_ids > len_pair:
ids_to_move = first_remove + second_remove // 2
pair_ids_to_move = second_remove - second_remove // 2
else:
ids_to_move = second_remove // 2
pair_ids_to_move = first_remove + second_remove - (second_remove // 2)
if self.truncation_side == "right":
ids = ids[:-ids_to_move] if ids_to_move > 0 else ids
pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids and pair_ids_to_move > 0 else pair_ids
else:
ids = ids[ids_to_move:]
pair_ids = pair_ids[pair_ids_to_move:] if pair_ids else None
# ONLY_SECOND
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
if self.truncation_side == "right":
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
else:
overflowing_tokens = pair_ids[:window_len]
pair_ids = pair_ids[num_tokens_to_remove:]
return ids, pair_ids, overflowing_tokens
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: list[int] | None = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
This method dynamically builds the token type IDs based on the tokenizer's configuration attributes:
- `token_type_ids_pattern`: Pattern to use ("all_zeros" or "bert_style")
- `token_type_ids_include_special_tokens`: Whether to account for special tokens in length calculation
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: Token type IDs according to the configured pattern.
Examples:
```python
# All zeros pattern (default, used by RoBERTa, BART, etc.)
tokenizer.token_type_ids_pattern = "all_zeros"
# Returns: [0, 0, 0, ...] for both sequences
# BERT-style pattern (first sequence gets 0s, second gets 1s)
tokenizer.token_type_ids_pattern = "bert_style"
# Returns: [0, 0, 0, ..., 1, 1, 1, ...] for sequence pairs
```
"""
# Calculate lengths - account for special tokens if configured
if self.token_type_ids_include_special_tokens:
# Build the full sequence to get accurate length
if token_ids_1 is None:
sequence = self.build_inputs_with_special_tokens(token_ids_0)
seq0_len = len(sequence)
seq1_len = 0
else:
full_sequence = self.build_inputs_with_special_tokens(token_ids_0, token_ids_1)
# Approximate split - this works for most tokenizers
# For more complex cases, subclasses should still override
seq0_with_special = self.build_inputs_with_special_tokens(token_ids_0)
seq0_len = len(seq0_with_special)
seq1_len = len(full_sequence) - seq0_len
else:
# Use raw token lengths
seq0_len = len(token_ids_0)
seq1_len = len(token_ids_1) if token_ids_1 is not None else 0
# Build token type IDs based on pattern
if self.special_tokens_pattern == "prefix_suffix":
total_len = len(getattr(self, "prefix_tokens", [])) + len(token_ids_0)
if token_ids_1 is not None:
total_len += len(token_ids_1)
total_len += len(getattr(self, "suffix_tokens", []))
return [0] * total_len
if self.token_type_ids_pattern == "bert_style" and token_ids_1 is not None:
# BERT-style: first sequence gets 0s, second sequence gets 1s
return [0] * seq0_len + [1] * seq1_len
else:
# All zeros pattern (default): everything gets 0s
return [0] * (seq0_len + seq1_len)
def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None) -> tuple[str, ...]:
"""
Default implementation for common vocabulary saving patterns.
Saves self.encoder/self.vocab as JSON, optionally with self.bpe_ranks as merges.
Returns empty tuple if no vocabulary exists.
Override this method if your tokenizer needs custom saving logic (e.g., SentencePiece models,
multiple vocabulary files, or special file formats).
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`tuple[str, ...]`: Paths to the files saved, or empty tuple if no files saved.
"""
import json
import os
vocab_attr = getattr(self, "encoder", None) or getattr(self, "vocab", None)
if vocab_attr is None:
return ()
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return ()
vocab_files_names = getattr(self, "vocab_files_names", {})
prefix = f"{filename_prefix}-" if filename_prefix else ""
# Save vocabulary
vocab_file = os.path.join(save_directory, prefix + vocab_files_names.get("vocab_file", "vocab.json"))
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(vocab_attr, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
# Save BPE merges if present
bpe_ranks = getattr(self, "bpe_ranks", None)
if bpe_ranks is None:
return (vocab_file,)
merge_file = os.path.join(save_directory, prefix + vocab_files_names.get("merges_file", "merges.txt"))
with open(merge_file, "w", encoding="utf-8") as writer:
if getattr(self, "add_bpe_version_header", False):
writer.write("#version: 0.2\n")
index = 0
for bpe_tokens, token_index in sorted(bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return (vocab_file, merge_file)
# Backward compatibility alias
PreTrainedTokenizer = PythonBackend
| PythonBackend |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 525361,
"end": 532514
} | class ____:
def __abs__(self):
if self.is_empty:
return 0.0
return abs(self.ul - self.ur) * abs(self.ul - self.ll)
def __add__(self, q):
if hasattr(q, "__float__"):
return Quad(self.ul + q, self.ur + q, self.ll + q, self.lr + q)
if len(q) != 4:
raise ValueError("Quad: bad seq len")
return Quad(self.ul + q[0], self.ur + q[1], self.ll + q[2], self.lr + q[3])
def __bool__(self):
return not self.is_empty
def __contains__(self, x):
try:
l = x.__len__()
except Exception:
if g_exceptions_verbose > 1: exception_info()
return False
if l == 2:
return util_point_in_quad(x, self)
if l != 4:
return False
if CheckRect(x):
if Rect(x).is_empty:
return True
return util_point_in_quad(x[:2], self) and util_point_in_quad(x[2:], self)
if CheckQuad(x):
for i in range(4):
if not util_point_in_quad(x[i], self):
return False
return True
return False
def __eq__(self, quad):
if not hasattr(quad, "__len__"):
return False
return len(quad) == 4 and (
self.ul == quad[0] and
self.ur == quad[1] and
self.ll == quad[2] and
self.lr == quad[3]
)
def __getitem__(self, i):
return (self.ul, self.ur, self.ll, self.lr)[i]
def __hash__(self):
return hash(tuple(self))
def __init__(self, *args, ul=None, ur=None, ll=None, lr=None):
'''
Quad() - all zero points
Quad(ul, ur, ll, lr)
Quad(quad) - new copy
Quad(sequence) - from 'sequence'
Explicit keyword args ul, ur, ll, lr override earlier settings if not
None.
'''
if not args:
self.ul = self.ur = self.ll = self.lr = Point()
elif len(args) > 4:
raise ValueError("Quad: bad seq len")
elif len(args) == 4:
self.ul, self.ur, self.ll, self.lr = map(Point, args)
elif len(args) == 1:
l = args[0]
if isinstance(l, mupdf.FzQuad):
self.this = l
self.ul, self.ur, self.ll, self.lr = Point(l.ul), Point(l.ur), Point(l.ll), Point(l.lr)
elif not hasattr(l, "__getitem__"):
raise ValueError("Quad: bad args")
elif len(l) != 4:
raise ValueError("Quad: bad seq len")
else:
self.ul, self.ur, self.ll, self.lr = map(Point, l)
else:
raise ValueError("Quad: bad args")
if ul is not None: self.ul = Point(ul)
if ur is not None: self.ur = Point(ur)
if ll is not None: self.ll = Point(ll)
if lr is not None: self.lr = Point(lr)
def __len__(self):
return 4
def __mul__(self, m):
q = Quad(self)
q = q.transform(m)
return q
def __neg__(self):
return Quad(-self.ul, -self.ur, -self.ll, -self.lr)
def __nonzero__(self):
return not self.is_empty
def __pos__(self):
return Quad(self)
def __repr__(self):
return "Quad" + str(tuple(self))
def __setitem__(self, i, v):
if i == 0: self.ul = Point(v)
elif i == 1: self.ur = Point(v)
elif i == 2: self.ll = Point(v)
elif i == 3: self.lr = Point(v)
else:
raise IndexError("index out of range")
return None
def __sub__(self, q):
if hasattr(q, "__float__"):
return Quad(self.ul - q, self.ur - q, self.ll - q, self.lr - q)
if len(q) != 4:
raise ValueError("Quad: bad seq len")
return Quad(self.ul - q[0], self.ur - q[1], self.ll - q[2], self.lr - q[3])
def __truediv__(self, m):
if hasattr(m, "__float__"):
im = 1. / m
else:
im = util_invert_matrix(m)[1]
if not im:
raise ZeroDivisionError("Matrix not invertible")
q = Quad(self)
q = q.transform(im)
return q
@property
def is_convex(self):
"""Check if quad is convex and not degenerate.
Notes:
Check that for the two diagonals, the other two corners are not
on the same side of the diagonal.
Returns:
True or False.
"""
m = planish_line(self.ul, self.lr) # puts this diagonal on x-axis
p1 = self.ll * m # transform the
p2 = self.ur * m # other two points
if p1.y * p2.y > 0:
return False
m = planish_line(self.ll, self.ur) # puts other diagonal on x-axis
p1 = self.lr * m # transform the
p2 = self.ul * m # remaining points
if p1.y * p2.y > 0:
return False
return True
@property
def is_empty(self):
"""Check whether all quad corners are on the same line.
This is the case if width or height is zero.
"""
return self.width < EPSILON or self.height < EPSILON
@property
def is_infinite(self):
"""Check whether this is the infinite quad."""
return self.rect.is_infinite
@property
def is_rectangular(self):
"""Check if quad is rectangular.
Notes:
Some rotation matrix can thus transform it into a rectangle.
This is equivalent to three corners enclose 90 degrees.
Returns:
True or False.
"""
sine = util_sine_between(self.ul, self.ur, self.lr)
if abs(sine - 1) > EPSILON: # the sine of the angle
return False
sine = util_sine_between(self.ur, self.lr, self.ll)
if abs(sine - 1) > EPSILON:
return False
sine = util_sine_between(self.lr, self.ll, self.ul)
if abs(sine - 1) > EPSILON:
return False
return True
def morph(self, p, m):
"""Morph the quad with matrix-like 'm' and point-like 'p'.
Return a new quad."""
if self.is_infinite:
return INFINITE_QUAD()
delta = Matrix(1, 1).pretranslate(p.x, p.y)
q = self * ~delta * m * delta
return q
@property
def rect(self):
r = Rect()
r.x0 = min(self.ul.x, self.ur.x, self.lr.x, self.ll.x)
r.y0 = min(self.ul.y, self.ur.y, self.lr.y, self.ll.y)
r.x1 = max(self.ul.x, self.ur.x, self.lr.x, self.ll.x)
r.y1 = max(self.ul.y, self.ur.y, self.lr.y, self.ll.y)
return r
def transform(self, m):
"""Replace quad by its transformation with matrix m."""
if hasattr(m, "__float__"):
pass
elif len(m) != 6:
raise ValueError("Matrix: bad seq len")
self.ul *= m
self.ur *= m
self.ll *= m
self.lr *= m
return self
__div__ = __truediv__
width = property(lambda self: max(abs(self.ul - self.ur), abs(self.ll - self.lr)))
height = property(lambda self: max(abs(self.ul - self.ll), abs(self.ur - self.lr)))
| Quad |
python | openai__openai-python | src/openai/types/realtime/realtime_response_create_mcp_tool.py | {
"start": 490,
"end": 998
} | class ____(BaseModel):
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
AllowedTools: TypeAlias = Union[List[str], AllowedToolsMcpToolFilter, None]
| AllowedToolsMcpToolFilter |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 13286,
"end": 14489
} | class ____(CompositeTicker):
''' Generate nice ticks across different date and time scales.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
num_minor_ticks = Override(default=0)
# TODO: (bev) InstanceDefault for this, someday
tickers = Override(default=lambda: [
AdaptiveTicker(
mantissas=[1, 2, 5],
base=10,
min_interval=0,
max_interval=500*ONE_MILLI,
num_minor_ticks=0,
),
AdaptiveTicker(
mantissas=[1, 2, 5, 10, 15, 20, 30],
base=60,
min_interval=ONE_SECOND,
max_interval=30*ONE_MINUTE,
num_minor_ticks=0,
),
AdaptiveTicker(
mantissas=[1, 2, 4, 6, 8, 12],
base=24,
min_interval=ONE_HOUR,
max_interval=12*ONE_HOUR,
num_minor_ticks=0,
),
AdaptiveTicker(
mantissas=[1, 2, 5],
base=10,
min_interval=ONE_DAY,
max_interval=None,
num_minor_ticks=0,
),
])
| TimedeltaTicker |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_diff_between_inclusive_threshold_range.py | {
"start": 919,
"end": 5731
} | class ____(
DataProfilerProfileMetricProvider
):
metric_name = "data_profiler.profile_numeric_columns_diff_between_inclusive_threshold_range"
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - too complex
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_diff = metrics.get("data_profiler.profile_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_diff["data_stats"]
requested_columns = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
for stat, bounds in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
between_bounds = is_value_between_bounds(
diff_val, bounds["lower"], bounds["upper"], inclusive=True
)
if not between_bounds:
requested_columns[col][stat] = {
"lower_bound": bounds["lower"],
"upper_bound": bounds["upper"],
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_diff_between_inclusive_threshold_range"
):
dependencies["data_profiler.profile_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsDiffBetweenInclusiveThresholdRange |
python | pyparsing__pyparsing | examples/simpleBool.py | {
"start": 737,
"end": 983
} | class ____:
def __init__(self, t):
self.label = t[0]
self.value = eval(t[0])
def __bool__(self) -> bool:
return self.value
def __str__(self) -> str:
return self.label
__repr__ = __str__
| BoolOperand |
python | ray-project__ray | python/ray/serve/metrics.py | {
"start": 7063,
"end": 9272
} | class ____(metrics.Histogram):
"""Tracks the size and number of events in buckets.
Histograms allow you to calculate aggregate quantiles
such as 25, 50, 95, 99 percentile latency for an RPC.
This corresponds to Prometheus' histogram metric:
https://prometheus.io/docs/concepts/metric_types/#histogram
Serve-related tags ("deployment", "replica", "application", "route")
are added automatically if not provided.
.. code-block:: python
@serve.deployment
class MyDeployment:
def __init__(self):
self.my_histogram = Histogram(
"my_histogram",
description=("Histogram of the __call__ method running time."),
boundaries=[1,2,4,8,16,32,64],
tag_keys=("model",),
)
self.my_histogram.set_default_tags({"model": "123"})
def __call__(self):
start = time.time()
self.my_histogram.observe(time.time() - start)
Args:
name: Name of the metric.
description: Description of the metric.
boundaries: Boundaries of histogram buckets.
tag_keys: Tag keys of the metric.
"""
def __init__(
self,
name: str,
description: str = "",
boundaries: List[float] = None,
tag_keys: Optional[Tuple[str]] = None,
):
if tag_keys and not isinstance(tag_keys, tuple):
raise TypeError(
"tag_keys should be a tuple type, got: " f"{type(tag_keys)}"
)
tag_keys = _add_serve_metric_tags(tag_keys)
super().__init__(name, description, boundaries, tag_keys)
self.set_default_tags({})
def set_default_tags(self, default_tags: Dict[str, str]):
super().set_default_tags(_add_serve_metric_default_tags(default_tags))
def observe(self, value: Union[int, float], tags: Dict[str, str] = None):
"""Observe the given value, add serve context
tag values to the tags
"""
_add_serve_context_tag_values(self._tag_keys, tags)
super().observe(value, tags)
| Histogram |
python | numba__numba | numba/cuda/tests/cudapy/test_dispatcher.py | {
"start": 388,
"end": 3652
} | class ____(CUDATestCase):
def _test_no_double_specialize(self, dispatcher, ty):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize(ty)
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig_same_types(self):
# Attempting to specialize a kernel jitted with a signature is illegal,
# even for the same types the kernel is already specialized for.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f, float32[::1])
def test_no_double_specialize_no_sig_same_types(self):
# Attempting to specialize an already-specialized kernel is illegal,
# even for the same types the kernel is already specialized for.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize(float32[::1])
self._test_no_double_specialize(f_specialized, float32[::1])
def test_no_double_specialize_sig_diff_types(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(int32[::1])')
def f(x):
pass
self._test_no_double_specialize(f, float32[::1])
def test_no_double_specialize_no_sig_diff_types(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize(int32[::1])
self._test_no_double_specialize(f_specialized, float32[::1])
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize(float32[::1])
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize(float32[::1])
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize(int32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
def test_specialize_cache_same_with_ordering(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types, taking into account array ordering and multiple
# arguments.
@cuda.jit
def f(x, y):
pass
self.assertEqual(len(f.specializations), 0)
# 'A' order specialization
f_f32a_f32a = f.specialize(float32[:], float32[:])
self.assertEqual(len(f.specializations), 1)
# 'C' order specialization
f_f32c_f32c = f.specialize(float32[::1], float32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_f32a_f32a, f_f32c_f32c)
# Reuse 'C' order specialization
f_f32c_f32c_2 = f.specialize(float32[::1], float32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIs(f_f32c_f32c, f_f32c_f32c_2)
| TestDispatcherSpecialization |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/assignment3.py | {
"start": 1229,
"end": 1348
} | class ____:
@classmethod
def method1(cls):
cls.v1: list[Literal[0]] = [] if issubclass(cls, int) else [0]
| A |
python | doocs__leetcode | solution/2400-2499/2408.Design SQL/Solution.py | {
"start": 0,
"end": 603
} | class ____:
def __init__(self, names: List[str], columns: List[int]):
self.tables = defaultdict(list)
def insertRow(self, name: str, row: List[str]) -> None:
self.tables[name].append(row)
def deleteRow(self, name: str, rowId: int) -> None:
pass
def selectCell(self, name: str, rowId: int, columnId: int) -> str:
return self.tables[name][rowId - 1][columnId - 1]
# Your SQL object will be instantiated and called as such:
# obj = SQL(names, columns)
# obj.insertRow(name,row)
# obj.deleteRow(name,rowId)
# param_3 = obj.selectCell(name,rowId,columnId)
| SQL |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 48890,
"end": 49012
} | class ____(TypedDict):
limit: int
TransactionNameStrategy = Literal["strict", "clientBased"]
| CustomMeasurementSettings |
python | allegroai__clearml | clearml/backend_api/api_proxy.py | {
"start": 2041,
"end": 3192
} | class ____(ApiServiceProxy):
_extra_services_modules = []
def _import_module(self, name: str, _: Optional[str]) -> Any:
for module_path in self._get_services_modules():
try:
return importlib.import_module(name, package=module_path)
except ImportError:
pass
raise ImportError("No module '{}' in all predefined services module paths".format(name))
@classmethod
def add_services_module(cls, module_path: str) -> None:
"""
Add an additional service module path to look in when importing types
"""
cls._extra_services_modules.append(module_path)
def _get_services_modules(self) -> Generator[str, None, None]:
"""
Yield all services module paths.
Paths are yielded in reverse order, so that users can add a services module that will override
the built-in main service module path (e.g. in case a type defined in the built-in module was redefined)
"""
for path in reversed(self._extra_services_modules):
yield path
yield self._main_services_module
| ExtApiServiceProxy |
python | pydantic__pydantic | pydantic/_internal/_decorators.py | {
"start": 14910,
"end": 33950
} | class ____:
"""Mapping of name in the class namespace to decorator info.
note that the name in the class namespace is the function or attribute name
not the field name!
"""
validators: dict[str, Decorator[ValidatorDecoratorInfo]] = field(default_factory=dict)
field_validators: dict[str, Decorator[FieldValidatorDecoratorInfo]] = field(default_factory=dict)
root_validators: dict[str, Decorator[RootValidatorDecoratorInfo]] = field(default_factory=dict)
field_serializers: dict[str, Decorator[FieldSerializerDecoratorInfo]] = field(default_factory=dict)
model_serializers: dict[str, Decorator[ModelSerializerDecoratorInfo]] = field(default_factory=dict)
model_validators: dict[str, Decorator[ModelValidatorDecoratorInfo]] = field(default_factory=dict)
computed_fields: dict[str, Decorator[ComputedFieldInfo]] = field(default_factory=dict)
@classmethod
def build(
cls,
typ: type[Any],
# Default to `True` for backwards compatibility:
replace_wrapped_methods: bool = True,
) -> Self:
"""Build a `DecoratorInfos` instance for the given model, dataclass or `TypedDict` type.
Decorators from parent classes are included, including "bare" classes (e.g. if `typ`
is a Pydantic model, non Pydantic parent model classes are also taken into account).
The collection of the decorators happens by respecting the MRO.
If one of the bases has an `__pydantic_decorators__` attribute set, it is assumed to be
a `DecoratorInfos` instance and is used as-is. The `__pydantic_decorators__` attribute
is *not* being set on the provided `typ`.
Args:
typ: The model, dataclass or `TypedDict` type to use when building the `DecoratorInfos` instance.
replace_wrapped_methods: Whether to replace the decorator's wrapped methods on `typ`.
This is useful e.g. for field validators which are initially class methods. This should
only be set to `True` if `typ` is a Pydantic model or dataclass (otherwise this results
in mutations of classes Pydantic doesn't "own").
"""
# reminder: dicts are ordered and replacement does not alter the order
res = cls()
# Iterate over the bases, without the actual `typ`.
# `1:-1` because we don't need to include `object`/`TypedDict`:
for base in reversed(mro(typ)[1:-1]):
existing: DecoratorInfos | None = base.__dict__.get('__pydantic_decorators__')
if existing is None:
existing, _ = _decorator_infos_for_class(base, collect_to_replace=False)
res.validators.update({k: v.bind_to_cls(typ) for k, v in existing.validators.items()})
res.field_validators.update({k: v.bind_to_cls(typ) for k, v in existing.field_validators.items()})
res.root_validators.update({k: v.bind_to_cls(typ) for k, v in existing.root_validators.items()})
res.field_serializers.update({k: v.bind_to_cls(typ) for k, v in existing.field_serializers.items()})
res.model_serializers.update({k: v.bind_to_cls(typ) for k, v in existing.model_serializers.items()})
res.model_validators.update({k: v.bind_to_cls(typ) for k, v in existing.model_validators.items()})
res.computed_fields.update({k: v.bind_to_cls(typ) for k, v in existing.computed_fields.items()})
decorator_infos, to_replace = _decorator_infos_for_class(typ, collect_to_replace=True)
res.validators.update(decorator_infos.validators)
res.field_validators.update(decorator_infos.field_validators)
res.root_validators.update(decorator_infos.root_validators)
res.field_serializers.update(decorator_infos.field_serializers)
res.model_serializers.update(decorator_infos.model_serializers)
res.model_validators.update(decorator_infos.model_validators)
res.computed_fields.update(decorator_infos.computed_fields)
if replace_wrapped_methods and to_replace:
for name, value in to_replace:
setattr(typ, name, value)
res._validate()
return res
def _validate(self) -> None:
seen: set[str] = set()
for field_ser in self.field_serializers.values():
for f_name in field_ser.info.fields:
if f_name in seen:
raise PydanticUserError(
f'Multiple field serializer functions were defined for field {f_name!r}, this is not allowed.',
code='multiple-field-serializers',
)
seen.add(f_name)
def update_from_config(self, config_wrapper: ConfigWrapper) -> None:
"""Update the decorator infos from the configuration of the class they are attached to."""
for name, computed_field_dec in self.computed_fields.items():
computed_field_dec.info._update_from_config(config_wrapper, name)
def _decorator_infos_for_class(
typ: type[Any],
*,
collect_to_replace: bool,
) -> tuple[DecoratorInfos, list[tuple[str, Any]]]:
"""Collect a `DecoratorInfos` for class, without looking into bases."""
res = DecoratorInfos()
to_replace: list[tuple[str, Any]] = []
for var_name, var_value in vars(typ).items():
if isinstance(var_value, PydanticDescriptorProxy):
info = var_value.decorator_info
if isinstance(info, ValidatorDecoratorInfo):
res.validators[var_name] = Decorator.build(typ, cls_var_name=var_name, shim=var_value.shim, info=info)
elif isinstance(info, FieldValidatorDecoratorInfo):
res.field_validators[var_name] = Decorator.build(
typ, cls_var_name=var_name, shim=var_value.shim, info=info
)
elif isinstance(info, RootValidatorDecoratorInfo):
res.root_validators[var_name] = Decorator.build(
typ, cls_var_name=var_name, shim=var_value.shim, info=info
)
elif isinstance(info, FieldSerializerDecoratorInfo):
res.field_serializers[var_name] = Decorator.build(
typ, cls_var_name=var_name, shim=var_value.shim, info=info
)
elif isinstance(info, ModelValidatorDecoratorInfo):
res.model_validators[var_name] = Decorator.build(
typ, cls_var_name=var_name, shim=var_value.shim, info=info
)
elif isinstance(info, ModelSerializerDecoratorInfo):
res.model_serializers[var_name] = Decorator.build(
typ, cls_var_name=var_name, shim=var_value.shim, info=info
)
else:
from ..fields import ComputedFieldInfo
isinstance(var_value, ComputedFieldInfo)
res.computed_fields[var_name] = Decorator.build(typ, cls_var_name=var_name, shim=None, info=info)
if collect_to_replace:
to_replace.append((var_name, var_value.wrapped))
return res, to_replace
def inspect_validator(
validator: Callable[..., Any], *, mode: FieldValidatorModes, type: Literal['field', 'model']
) -> bool:
"""Look at a field or model validator function and determine whether it takes an info argument.
An error is raised if the function has an invalid signature.
Args:
validator: The validator function to inspect.
mode: The proposed validator mode.
type: The type of validator, either 'field' or 'model'.
Returns:
Whether the validator takes an info argument.
"""
try:
sig = _signature_no_eval(validator)
except (ValueError, TypeError):
# `inspect.signature` might not be able to infer a signature, e.g. with C objects.
# In this case, we assume no info argument is present:
return False
n_positional = count_positional_required_params(sig)
if mode == 'wrap':
if n_positional == 3:
return True
elif n_positional == 2:
return False
else:
assert mode in {'before', 'after', 'plain'}, f"invalid mode: {mode!r}, expected 'before', 'after' or 'plain"
if n_positional == 2:
return True
elif n_positional == 1:
return False
raise PydanticUserError(
f'Unrecognized {type} validator function signature for {validator} with `mode={mode}`: {sig}',
code='validator-signature',
)
def inspect_field_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> tuple[bool, bool]:
"""Look at a field serializer function and determine if it is a field serializer,
and whether it takes an info argument.
An error is raised if the function has an invalid signature.
Args:
serializer: The serializer function to inspect.
mode: The serializer mode, either 'plain' or 'wrap'.
Returns:
Tuple of (is_field_serializer, info_arg).
"""
try:
sig = _signature_no_eval(serializer)
except (ValueError, TypeError):
# `inspect.signature` might not be able to infer a signature, e.g. with C objects.
# In this case, we assume no info argument is present and this is not a method:
return (False, False)
first = next(iter(sig.parameters.values()), None)
is_field_serializer = first is not None and first.name == 'self'
n_positional = count_positional_required_params(sig)
if is_field_serializer:
# -1 to correct for self parameter
info_arg = _serializer_info_arg(mode, n_positional - 1)
else:
info_arg = _serializer_info_arg(mode, n_positional)
if info_arg is None:
raise PydanticUserError(
f'Unrecognized field_serializer function signature for {serializer} with `mode={mode}`:{sig}',
code='field-serializer-signature',
)
return is_field_serializer, info_arg
def inspect_annotated_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> bool:
"""Look at a serializer function used via `Annotated` and determine whether it takes an info argument.
An error is raised if the function has an invalid signature.
Args:
serializer: The serializer function to check.
mode: The serializer mode, either 'plain' or 'wrap'.
Returns:
info_arg
"""
try:
sig = _signature_no_eval(serializer)
except (ValueError, TypeError):
# `inspect.signature` might not be able to infer a signature, e.g. with C objects.
# In this case, we assume no info argument is present:
return False
info_arg = _serializer_info_arg(mode, count_positional_required_params(sig))
if info_arg is None:
raise PydanticUserError(
f'Unrecognized field_serializer function signature for {serializer} with `mode={mode}`:{sig}',
code='field-serializer-signature',
)
else:
return info_arg
def inspect_model_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> bool:
"""Look at a model serializer function and determine whether it takes an info argument.
An error is raised if the function has an invalid signature.
Args:
serializer: The serializer function to check.
mode: The serializer mode, either 'plain' or 'wrap'.
Returns:
`info_arg` - whether the function expects an info argument.
"""
if isinstance(serializer, (staticmethod, classmethod)) or not is_instance_method_from_sig(serializer):
raise PydanticUserError(
'`@model_serializer` must be applied to instance methods', code='model-serializer-instance-method'
)
sig = _signature_no_eval(serializer)
info_arg = _serializer_info_arg(mode, count_positional_required_params(sig))
if info_arg is None:
raise PydanticUserError(
f'Unrecognized model_serializer function signature for {serializer} with `mode={mode}`:{sig}',
code='model-serializer-signature',
)
else:
return info_arg
def _serializer_info_arg(mode: Literal['plain', 'wrap'], n_positional: int) -> bool | None:
if mode == 'plain':
if n_positional == 1:
# (input_value: Any, /) -> Any
return False
elif n_positional == 2:
# (model: Any, input_value: Any, /) -> Any
return True
else:
assert mode == 'wrap', f"invalid mode: {mode!r}, expected 'plain' or 'wrap'"
if n_positional == 2:
# (input_value: Any, serializer: SerializerFunctionWrapHandler, /) -> Any
return False
elif n_positional == 3:
# (input_value: Any, serializer: SerializerFunctionWrapHandler, info: SerializationInfo, /) -> Any
return True
return None
AnyDecoratorCallable: TypeAlias = (
'Union[classmethod[Any, Any, Any], staticmethod[Any, Any], partialmethod[Any], Callable[..., Any]]'
)
def is_instance_method_from_sig(function: AnyDecoratorCallable) -> bool:
"""Whether the function is an instance method.
It will consider a function as instance method if the first parameter of
function is `self`.
Args:
function: The function to check.
Returns:
`True` if the function is an instance method, `False` otherwise.
"""
sig = _signature_no_eval(unwrap_wrapped_function(function))
first = next(iter(sig.parameters.values()), None)
if first and first.name == 'self':
return True
return False
def ensure_classmethod_based_on_signature(function: AnyDecoratorCallable) -> Any:
"""Apply the `@classmethod` decorator on the function.
Args:
function: The function to apply the decorator on.
Return:
The `@classmethod` decorator applied function.
"""
if not isinstance(
unwrap_wrapped_function(function, unwrap_class_static_method=False), classmethod
) and _is_classmethod_from_sig(function):
return classmethod(function) # type: ignore[arg-type]
return function
def _is_classmethod_from_sig(function: AnyDecoratorCallable) -> bool:
sig = _signature_no_eval(unwrap_wrapped_function(function))
first = next(iter(sig.parameters.values()), None)
if first and first.name == 'cls':
return True
return False
def unwrap_wrapped_function(
func: Any,
*,
unwrap_partial: bool = True,
unwrap_class_static_method: bool = True,
) -> Any:
"""Recursively unwraps a wrapped function until the underlying function is reached.
This handles property, functools.partial, functools.partialmethod, staticmethod, and classmethod.
Args:
func: The function to unwrap.
unwrap_partial: If True (default), unwrap partial and partialmethod decorators.
unwrap_class_static_method: If True (default), also unwrap classmethod and staticmethod
decorators. If False, only unwrap partial and partialmethod decorators.
Returns:
The underlying function of the wrapped function.
"""
# Define the types we want to check against as a single tuple.
unwrap_types = (
(property, cached_property)
+ ((partial, partialmethod) if unwrap_partial else ())
+ ((staticmethod, classmethod) if unwrap_class_static_method else ())
)
while isinstance(func, unwrap_types):
if unwrap_class_static_method and isinstance(func, (classmethod, staticmethod)):
func = func.__func__
elif isinstance(func, (partial, partialmethod)):
func = func.func
elif isinstance(func, property):
func = func.fget # arbitrary choice, convenient for computed fields
else:
# Make coverage happy as it can only get here in the last possible case
assert isinstance(func, cached_property)
func = func.func # type: ignore
return func
_function_like = (
partial,
partialmethod,
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
types.WrapperDescriptorType,
types.MethodWrapperType,
types.MemberDescriptorType,
)
def get_callable_return_type(
callable_obj: Any,
globalns: GlobalsNamespace | None = None,
localns: MappingNamespace | None = None,
) -> Any | PydanticUndefinedType:
"""Get the callable return type.
Args:
callable_obj: The callable to analyze.
globalns: The globals namespace to use during type annotation evaluation.
localns: The locals namespace to use during type annotation evaluation.
Returns:
The function return type.
"""
if isinstance(callable_obj, type):
# types are callables, and we assume the return type
# is the type itself (e.g. `int()` results in an instance of `int`).
return callable_obj
if not isinstance(callable_obj, _function_like):
call_func = getattr(type(callable_obj), '__call__', None) # noqa: B004
if call_func is not None:
callable_obj = call_func
hints = get_function_type_hints(
unwrap_wrapped_function(callable_obj),
include_keys={'return'},
globalns=globalns,
localns=localns,
)
return hints.get('return', PydanticUndefined)
def count_positional_required_params(sig: Signature) -> int:
"""Get the number of positional (required) arguments of a signature.
This function should only be used to inspect signatures of validation and serialization functions.
The first argument (the value being serialized or validated) is counted as a required argument
even if a default value exists.
Returns:
The number of positional arguments of a signature.
"""
parameters = list(sig.parameters.values())
return sum(
1
for param in parameters
if can_be_positional(param)
# First argument is the value being validated/serialized, and can have a default value
# (e.g. `float`, which has signature `(x=0, /)`). We assume other parameters (the info arg
# for instance) should be required, and thus without any default value.
and (param.default is Parameter.empty or param is parameters[0])
)
def ensure_property(f: Any) -> Any:
"""Ensure that a function is a `property` or `cached_property`, or is a valid descriptor.
Args:
f: The function to check.
Returns:
The function, or a `property` or `cached_property` instance wrapping the function.
"""
if ismethoddescriptor(f) or isdatadescriptor(f):
return f
else:
return property(f)
def _signature_no_eval(f: Callable[..., Any]) -> Signature:
"""Get the signature of a callable without evaluating any annotations."""
if sys.version_info >= (3, 14):
from annotationlib import Format
return signature(f, annotation_format=Format.FORWARDREF)
else:
return signature(f)
| DecoratorInfos |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 298224,
"end": 299044
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of SetOrganizationInteractionLimit"""
__schema__ = github_schema
__field_names__ = ("organization_id", "limit", "expiry", "client_mutation_id")
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""The ID of the organization to set a limit for."""
limit = sgqlc.types.Field(sgqlc.types.non_null(RepositoryInteractionLimit), graphql_name="limit")
"""The limit to set."""
expiry = sgqlc.types.Field(RepositoryInteractionLimitExpiry, graphql_name="expiry")
"""When this limit should expire."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| SetOrganizationInteractionLimitInput |
python | pypa__pip | src/pip/_vendor/packaging/_elffile.py | {
"start": 673,
"end": 3286
} | class ____:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error as e:
raise ELFInvalid("unable to parse identification") from e
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError as e:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or encoding ({self.encoding})"
) from e
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> str | None:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None
| ELFFile |
python | huggingface__transformers | src/transformers/models/nllb_moe/modeling_nllb_moe.py | {
"start": 29855,
"end": 30396
} | class ____(PreTrainedModel):
config: NllbMoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["NllbMoeEncoderLayer", "NllbMoeDecoderLayer"]
# TODO: If anyone is up to it to make sure tests pass etc
# Flash attention has problems due to not preparing masks the same way as eager/sdpa
# SDPA has more flaky logits which requires more time to look into tests
_supports_flash_attn = False
_supports_sdpa = False
_supports_flex_attn = False
| NllbMoePreTrainedModel |
python | django-extensions__django-extensions | tests/auth/test_mixins.py | {
"start": 657,
"end": 1604
} | class ____(TestCase):
factory = RequestFactory()
User = get_user_model()
@classmethod
def setUpTestData(cls):
cls.user = cls.User.objects.create(username="Joe", password="pass")
cls.ownerModel = HasOwnerModel.objects.create(owner=cls.user)
# Test if owner model has access
def test_permission_pass(self):
request = self.factory.get("/permission-required/" + str(self.ownerModel.id))
request.user = self.user
resp = OwnerView.as_view()(request)
self.assertEqual(resp.status_code, 200)
# # Test if non owner model is redirected
def test_permission_denied_and_redirect(self):
request = self.factory.get("/permission-required/" + str(self.ownerModel.id))
request.user = AnonymousUser()
resp = OwnerView.as_view()(request)
self.assertRaises(PermissionDenied)
self.assertEqual(resp.status_code, 302)
| ModelUserFieldPermissionMixinTests |
python | RaRe-Technologies__gensim | gensim/models/word2vec.py | {
"start": 101920,
"end": 107465
} | class ____(namedtuple('Heapitem', 'count, index, left, right')):
def __lt__(self, other):
return self.count < other.count
def _build_heap(wv):
heap = list(Heapitem(wv.get_vecattr(i, 'count'), i, None, None) for i in range(len(wv.index_to_key)))
heapq.heapify(heap)
for i in range(len(wv) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(
heap, Heapitem(count=min1.count + min2.count, index=i + len(wv), left=min1, right=min2)
)
return heap
def _assign_binary_codes(wv):
"""
Appends a binary code to each vocab term.
Parameters
----------
wv : KeyedVectors
A collection of word-vectors.
Sets the .code and .point attributes of each node.
Each code is a numpy.array containing 0s and 1s.
Each point is an integer.
"""
logger.info("constructing a huffman tree from %i words", len(wv))
heap = _build_heap(wv)
if not heap:
#
# TODO: how can we end up with an empty heap?
#
logger.info("built huffman tree with maximum node depth 0")
return
# recurse over the tree, assigning a binary code to each vocabulary word
max_depth = 0
stack = [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node[1] < len(wv): # node[1] = index
# leaf node => store its path from the root
k = node[1]
wv.set_vecattr(k, 'code', codes)
wv.set_vecattr(k, 'point', points)
# node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = np.array(list(points) + [node.index - len(wv)], dtype=np.uint32)
stack.append((node.left, np.array(list(codes) + [0], dtype=np.uint8), points))
stack.append((node.right, np.array(list(codes) + [1], dtype=np.uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 \
# -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO
)
logger.info("running %s", " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # noqa:F811 avoid referencing __main__ in pickle
np.seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument(
"-sample",
help="Set threshold for occurrence of words. "
"Those that appear with higher frequency in the training data will be randomly down-sampled;"
" default is 1e-3, useful range is (0, 1e-5)",
type=float, default=1e-3
)
parser.add_argument(
"-hs", help="Use Hierarchical Softmax; default is 0 (not used)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument(
"-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)",
type=int, default=5
)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument(
"-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5",
type=int, default=5
)
parser.add_argument(
"-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)",
type=int, default=1, choices=[0, 1]
)
parser.add_argument(
"-binary", help="Save the resulting vectors in binary mode; default is 0 (off)",
type=int, default=0, choices=[0, 1]
)
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, vector_size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, epochs=args.iter,
)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
| Heapitem |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/providers.py | {
"start": 36464,
"end": 40483
} | class ____(PrimitiveProvider):
lifetime = "test_case"
def __init__(
self, conjecturedata: Optional["ConjectureData"], /, *, bytestring: bytes
):
super().__init__(conjecturedata)
self.bytestring = bytestring
self.index = 0
self.drawn = bytearray()
def _draw_bits(self, n):
if n == 0: # pragma: no cover
return 0
n_bytes = bits_to_bytes(n)
if self.index + n_bytes > len(self.bytestring):
self._cd.mark_overrun()
buf = bytearray(self.bytestring[self.index : self.index + n_bytes])
self.index += n_bytes
buf[0] &= BYTE_MASKS[n % 8]
buf = bytes(buf)
self.drawn += buf
return int_from_bytes(buf)
def draw_boolean(
self,
p: float = 0.5,
) -> bool:
if p <= 0:
return False
if p >= 1:
return True
# always use one byte for booleans to maintain constant draw size.
# If a probability requires more than 8 bits to represent precisely,
# the result will be slightly biased, but not badly.
bits = 8
size = 2**bits
# always leave at least one value that can be true, even for very small
# p.
falsey = max(1, math.floor(size * (1 - p)))
n = self._draw_bits(bits)
return n >= falsey
def draw_integer(
self,
min_value: int | None = None,
max_value: int | None = None,
*,
weights: dict[int, float] | None = None,
shrink_towards: int = 0,
) -> int:
assert self._cd is not None
# we explicitly ignore integer weights for now, as they are likely net
# negative on fuzzer performance.
if min_value is None and max_value is None:
min_value = -(2**127)
max_value = 2**127 - 1
elif min_value is None:
assert max_value is not None
min_value = max_value - 2**64
elif max_value is None:
assert min_value is not None
max_value = min_value + 2**64
if min_value == max_value:
return min_value
bits = (max_value - min_value).bit_length()
value = self._draw_bits(bits)
while not (min_value <= value <= max_value):
value = self._draw_bits(bits)
return value
def draw_float(
self,
*,
min_value: float = -math.inf,
max_value: float = math.inf,
allow_nan: bool = True,
smallest_nonzero_magnitude: float,
) -> float:
n = self._draw_bits(64)
sign = -1 if n >> 64 else 1
f = sign * lex_to_float(n & ((1 << 64) - 1))
clamper = make_float_clamper(
min_value,
max_value,
smallest_nonzero_magnitude=smallest_nonzero_magnitude,
allow_nan=allow_nan,
)
return clamper(f)
def _draw_collection(self, min_size, max_size, *, alphabet_size):
average_size = min(
max(min_size * 2, min_size + 5),
0.5 * (min_size + max_size),
)
elements = many(
self._cd,
min_size=min_size,
max_size=max_size,
average_size=average_size,
observe=False,
)
values = []
while elements.more():
values.append(self.draw_integer(0, alphabet_size - 1))
return values
def draw_string(
self,
intervals: IntervalSet,
*,
min_size: int = 0,
max_size: int = COLLECTION_DEFAULT_MAX_SIZE,
) -> str:
values = self._draw_collection(min_size, max_size, alphabet_size=len(intervals))
return "".join(chr(intervals[v]) for v in values)
def draw_bytes(
self,
min_size: int = 0,
max_size: int = COLLECTION_DEFAULT_MAX_SIZE,
) -> bytes:
values = self._draw_collection(min_size, max_size, alphabet_size=2**8)
return bytes(values)
| BytestringProvider |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 948,
"end": 1388
} | class ____(TypedDict, total=False):
durationP50: float | None
durationP90: float | None
crashFreeUsers: float | None
crashFreeSessions: float | None
totalUsers: int | None
totalUsers24h: int | None
totalProjectUsers24h: int | None
totalSessions: int | None
totalSessions24h: int | None
totalProjectSessions24h: int | None
adoption: float | None
sessionsAdoption: float | None
| HealthDataOptional |
python | Netflix__metaflow | metaflow/plugins/cards/card_client.py | {
"start": 501,
"end": 3903
} | class ____:
"""
`Card` represents an individual Metaflow Card, a single HTML file, produced by
the card `@card` decorator. `Card`s are contained by `CardContainer`, returned by
`get_cards`.
Note that the contents of the card, an HTML file, is retrieved lazily when you call
`Card.get` for the first time or when the card is rendered in a notebook.
"""
def __init__(
self,
card_ds,
type,
path,
hash,
id=None,
html=None,
created_on=None,
from_resumed=False,
origin_pathspec=None,
):
# private attributes
self._path = path
self._html = html
self._created_on = created_on
self._card_ds = card_ds
self._card_id = id
self._data_path = None
# public attributes
self.hash = hash
self.type = type
self.from_resumed = from_resumed
self.origin_pathspec = origin_pathspec
# Tempfile to open stuff in browser
self._temp_file = None
def get_data(self) -> Optional[dict]:
# currently an internal method to retrieve a card's data.
if self._data_path is None:
data_paths = self._card_ds.extract_data_paths(
card_type=self.type, card_hash=self.hash, card_id=self._card_id
)
if len(data_paths) == 0:
return None
self._data_path = data_paths[0]
return self._card_ds.get_card_data(self._data_path)
def get(self) -> str:
"""
Retrieves the HTML contents of the card from the
Metaflow datastore.
Returns
-------
str
HTML contents of the card.
"""
if self._html is not None:
return self._html
self._html = self._card_ds.get_card_html(self.path)
return self._html
@property
def path(self) -> str:
"""
The path of the card in the datastore which uniquely
identifies the card.
Returns
-------
str
Path to the card
"""
return self._path
@property
def id(self) -> Optional[str]:
"""
The ID of the card, if specified with `@card(id=ID)`.
Returns
-------
Optional[str]
ID of the card
"""
return self._card_id
def __str__(self):
return "<Card at '%s'>" % self._path
def view(self) -> None:
"""
Opens the card in a local web browser.
This call uses Python's built-in [`webbrowser`](https://docs.python.org/3/library/webbrowser.html)
module to open the card.
"""
import webbrowser
self._temp_file = tempfile.NamedTemporaryFile(suffix=".html")
html = self.get()
self._temp_file.write(html.encode())
self._temp_file.seek(0)
url = "file://" + os.path.abspath(self._temp_file.name)
webbrowser.open(url)
def _repr_html_(self):
main_html = []
container_id = uuid.uuid4()
main_html.append(
"<script type='text/javascript'>var mfContainerId = '%s';</script>"
% container_id
)
main_html.append(
"<div class='embed' data-container='%s'>%s</div>"
% (container_id, self.get())
)
return "\n".join(main_html)
| Card |
python | tiangolo__fastapi | tests/test_security_api_key_cookie.py | {
"start": 217,
"end": 2004
} | class ____(BaseModel):
username: str
def get_current_user(oauth_header: str = Security(api_key)):
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: User = Depends(get_current_user)):
return current_user
def test_security_api_key():
client = TestClient(app, cookies={"key": "secret"})
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key():
client = TestClient(app)
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "APIKey"
def test_openapi_schema():
client = TestClient(app)
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"APIKeyCookie": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyCookie": {"type": "apiKey", "name": "key", "in": "cookie"}
}
},
}
| User |
python | spack__spack | lib/spack/spack/fetch_strategy.py | {
"start": 52042,
"end": 64822
} | class ____(URLFetchStrategy):
"""Fetch strategy that verifies the content digest during fetching,
as well as after expanding it."""
def __init__(self, url, archive_sha256: str, expanded_sha256: str):
super().__init__(url=url, checksum=archive_sha256)
self.expanded_sha256 = expanded_sha256
def expand(self):
"""Verify checksum after expanding the archive."""
# Expand the archive
super().expand()
# Ensure a single patch file.
src_dir = self.stage.source_path
files = os.listdir(src_dir)
if len(files) != 1:
raise ChecksumError(self, f"Expected a single file in {src_dir}.")
verify_checksum(
os.path.join(src_dir, files[0]), self.expanded_sha256, self.url, self._effective_url
)
def verify_checksum(file: str, digest: str, url: str, effective_url: Optional[str]) -> None:
checker = crypto.Checker(digest)
if not checker.check(file):
# On failure, provide some information about the file size and
# contents, so that we can quickly see what the issue is (redirect
# was not followed, empty file, text instead of binary, ...)
size, contents = fs.filesummary(file)
long_msg = (
f"Expected {digest} but got {checker.sum}. "
f"File size = {size} bytes. Contents = {contents!r}. "
f"URL = {url}"
)
if effective_url and effective_url != url:
long_msg += f", redirected to = {effective_url}"
raise ChecksumError(f"{checker.hash_name} checksum failed for {file}", long_msg)
def stable_target(fetcher):
"""Returns whether the fetcher target is expected to have a stable
checksum. This is only true if the target is a preexisting archive
file."""
if isinstance(fetcher, URLFetchStrategy) and fetcher.cachable:
return True
return False
def from_url(url: str) -> URLFetchStrategy:
"""Given a URL, find an appropriate fetch strategy for it.
Currently just gives you a URLFetchStrategy that uses curl.
TODO: make this return appropriate fetch strategies for other types of URLs.
"""
return URLFetchStrategy(url=url)
def from_kwargs(**kwargs) -> FetchStrategy:
"""Construct an appropriate FetchStrategy from the given keyword arguments.
Args:
**kwargs: dictionary of keyword arguments, e.g. from a ``version()`` directive in a
package.
Returns:
The fetch strategy that matches the args, based on attribute names (e.g., ``git``, ``hg``,
etc.)
Raises:
spack.error.FetchError: If no ``fetch_strategy`` matches the args.
"""
for fetcher in all_strategies:
if fetcher.matches(kwargs):
return fetcher(**kwargs)
raise InvalidArgsError(**kwargs)
def check_pkg_attributes(pkg):
"""Find ambiguous top-level fetch attributes in a package.
Currently this only ensures that two or more VCS fetch strategies are
not specified at once.
"""
# a single package cannot have URL attributes for multiple VCS fetch
# strategies *unless* they are the same attribute.
conflicts = set([s.url_attr for s in all_strategies if hasattr(pkg, s.url_attr)])
# URL isn't a VCS fetch method. We can use it with a VCS method.
conflicts -= set(["url"])
if len(conflicts) > 1:
raise FetcherConflict(
"Package %s cannot specify %s together. Pick at most one."
% (pkg.name, comma_and(quote(conflicts)))
)
def _check_version_attributes(fetcher, pkg, version):
"""Ensure that the fetcher for a version is not ambiguous.
This assumes that we have already determined the fetcher for the
specific version using ``for_package_version()``
"""
all_optionals = set(a for s in all_strategies for a in s.optional_attrs)
args = pkg.versions[version]
extra = set(args) - set(fetcher.optional_attrs) - set([fetcher.url_attr, "no_cache"])
extra.intersection_update(all_optionals)
if extra:
legal_attrs = [fetcher.url_attr] + list(fetcher.optional_attrs)
raise FetcherConflict(
"%s version '%s' has extra arguments: %s"
% (pkg.name, version, comma_and(quote(extra))),
"Valid arguments for a %s fetcher are: \n %s"
% (fetcher.url_attr, comma_and(quote(legal_attrs))),
)
def _extrapolate(pkg, version):
"""Create a fetcher from an extrapolated URL for this version."""
try:
return URLFetchStrategy(url=pkg.url_for_version(version), fetch_options=pkg.fetch_options)
except spack.error.NoURLError:
raise ExtrapolationError(
f"Can't extrapolate a URL for version {version} because "
f"package {pkg.name} defines no URLs"
)
def _from_merged_attrs(fetcher, pkg, version):
"""Create a fetcher from merged package and version attributes."""
if fetcher.url_attr == "url":
mirrors = pkg.all_urls_for_version(version)
url = mirrors[0]
mirrors = mirrors[1:]
attrs = {fetcher.url_attr: url, "mirrors": mirrors}
else:
url = getattr(pkg, fetcher.url_attr)
attrs = {fetcher.url_attr: url}
attrs["fetch_options"] = pkg.fetch_options
attrs.update(pkg.versions[version])
if fetcher.url_attr == "git":
pkg_attr_list = ["submodules", "git_sparse_paths"]
for pkg_attr in pkg_attr_list:
if hasattr(pkg, pkg_attr):
attrs.setdefault(pkg_attr, getattr(pkg, pkg_attr))
return fetcher(**attrs)
def for_package_version(pkg, version=None):
saved_versions = None
if version is not None:
saved_versions = pkg.spec.versions
try:
return _for_package_version(pkg, version)
finally:
if saved_versions is not None:
pkg.spec.versions = saved_versions
def _for_package_version(pkg, version=None):
"""Determine a fetch strategy based on the arguments supplied to
version() in the package description."""
# No-code packages have a custom fetch strategy to work around issues
# with resource staging.
if not pkg.has_code:
return BundleFetchStrategy()
check_pkg_attributes(pkg)
if version is not None:
assert not pkg.spec.concrete, "concrete specs should not pass the 'version=' argument"
# Specs are initialized with the universe range, if no version information is given,
# so here we make sure we always match the version passed as argument
if not isinstance(version, spack.version.StandardVersion):
version = spack.version.Version(version)
version_list = spack.version.VersionList()
version_list.add(version)
pkg.spec.versions = version_list
else:
version = pkg.version
# if it's a commit, we must use a GitFetchStrategy
commit_sha = pkg.spec.variants.get("commit", None)
if isinstance(version, spack.version.GitVersion) or commit_sha:
if not hasattr(pkg, "git"):
raise spack.error.FetchError(
f"Cannot fetch git version for {pkg.name}. Package has no 'git' attribute"
)
# Populate the version with comparisons to other commits
if isinstance(version, spack.version.GitVersion):
from spack.version.git_ref_lookup import GitRefLookup
version.attach_lookup(GitRefLookup(pkg.name))
# For GitVersion, we have no way to determine whether a ref is a branch or tag
# Fortunately, we handle branches and tags identically, except tags are
# handled slightly more conservatively for older versions of git.
# We call all non-commit refs tags in this context, at the cost of a slight
# performance hit for branches on older versions of git.
# Branches cannot be cached, so we tell the fetcher not to cache tags/branches
# TODO(psakiev) eventually we should only need to clone based on the commit
ref_type = None
ref_value = None
if commit_sha:
ref_type = "commit"
ref_value = commit_sha.value
else:
ref_type = "commit" if version.is_commit else "tag"
ref_value = version.ref
kwargs = {ref_type: ref_value, "no_cache": ref_type != "commit"}
kwargs["git"] = pkg.version_or_package_attr("git", version)
kwargs["submodules"] = pkg.version_or_package_attr("submodules", version, False)
kwargs["git_sparse_paths"] = pkg.version_or_package_attr("git_sparse_paths", version, None)
# if the ref_version is a known version from the package, use that version's
# attributes
ref_version = getattr(pkg.version, "ref_version", None)
if ref_version:
kwargs["git"] = pkg.version_or_package_attr("git", ref_version)
kwargs["submodules"] = pkg.version_or_package_attr("submodules", ref_version, False)
fetcher = GitFetchStrategy(**kwargs)
return fetcher
# If it's not a known version, try to extrapolate one by URL
if version not in pkg.versions:
return _extrapolate(pkg, version)
# Set package args first so version args can override them
args = {"fetch_options": pkg.fetch_options}
# Grab a dict of args out of the package version dict
args.update(pkg.versions[version])
# If the version specifies a `url_attr` directly, use that.
for fetcher in all_strategies:
if fetcher.url_attr in args:
_check_version_attributes(fetcher, pkg, version)
if fetcher.url_attr == "git" and hasattr(pkg, "submodules"):
args.setdefault("submodules", pkg.submodules)
return fetcher(**args)
# if a version's optional attributes imply a particular fetch
# strategy, and we have the `url_attr`, then use that strategy.
for fetcher in all_strategies:
if hasattr(pkg, fetcher.url_attr) or fetcher.url_attr == "url":
optionals = fetcher.optional_attrs
if optionals and any(a in args for a in optionals):
_check_version_attributes(fetcher, pkg, version)
return _from_merged_attrs(fetcher, pkg, version)
# if the optional attributes tell us nothing, then use any `url_attr`
# on the package. This prefers URL vs. VCS, b/c URLFetchStrategy is
# defined first in this file.
for fetcher in all_strategies:
if hasattr(pkg, fetcher.url_attr):
_check_version_attributes(fetcher, pkg, version)
return _from_merged_attrs(fetcher, pkg, version)
raise InvalidArgsError(pkg, version, **args)
def from_url_scheme(url: str, **kwargs) -> FetchStrategy:
"""Finds a suitable FetchStrategy by matching its url_attr with the scheme
in the given url."""
parsed_url = urllib.parse.urlparse(url, scheme="file")
scheme_mapping = kwargs.get("scheme_mapping") or {
"file": "url",
"http": "url",
"https": "url",
"ftp": "url",
"ftps": "url",
}
scheme = parsed_url.scheme
scheme = scheme_mapping.get(scheme, scheme)
for fetcher in all_strategies:
url_attr = getattr(fetcher, "url_attr", None)
if url_attr and url_attr == scheme:
return fetcher(url=url, **kwargs)
raise ValueError(f'No FetchStrategy found for url with scheme: "{parsed_url.scheme}"')
def from_list_url(pkg):
"""If a package provides a URL which lists URLs for resources by
version, this can can create a fetcher for a URL discovered for
the specified package's version."""
if pkg.list_url:
try:
versions = pkg.fetch_remote_versions()
try:
# get a URL, and a checksum if we have it
url_from_list = versions[pkg.version]
checksum = None
# try to find a known checksum for version, from the package
version = pkg.version
if version in pkg.versions:
args = pkg.versions[version]
checksum = next(
(v for k, v in args.items() if k in crypto.hashes), args.get("checksum")
)
# construct a fetcher
return URLFetchStrategy(
url=url_from_list, checksum=checksum, fetch_options=pkg.fetch_options
)
except KeyError as e:
tty.debug(e)
tty.msg("Cannot find version %s in url_list" % pkg.version)
except BaseException as e:
# TODO: Don't catch BaseException here! Be more specific.
tty.debug(e)
tty.msg("Could not determine url from list_url.")
| FetchAndVerifyExpandedFile |
python | sanic-org__sanic | sanic/cli/inspector.py | {
"start": 884,
"end": 3266
} | class ____(ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_add_shared(self)
if not self.description:
self.description = ""
self.description = get_logo(True) + self.description
def make_inspector_parser(parser: ArgumentParser) -> None:
_add_shared(parser)
subparsers = parser.add_subparsers(
action=SanicSubParsersAction,
dest="action",
description=(
"Run one or none of the below subcommands. Using inspect without "
"a subcommand will fetch general information about the state "
"of the application instance.\n\n"
"Or, you can optionally follow inspect with a subcommand. "
"If you have created a custom "
"Inspector instance, then you can run custom commands. See "
"https://sanic.dev/en/guide/deployment/inspector.html "
"for more details."
),
title=" Subcommands",
parser_class=InspectorSubParser,
)
reloader = subparsers.add_parser(
"reload",
help="Trigger a reload of the server workers",
formatter_class=SanicHelpFormatter,
)
reloader.add_argument(
"--zero-downtime",
action="store_true",
help=(
"Whether to wait for the new process to be online before "
"terminating the old"
),
)
subparsers.add_parser(
"shutdown",
help="Shutdown the application and all processes",
formatter_class=SanicHelpFormatter,
)
scale = subparsers.add_parser(
"scale",
help="Scale the number of workers",
formatter_class=SanicHelpFormatter,
)
scale.add_argument(
"replicas",
type=int,
help="Number of workers requested",
)
custom = subparsers.add_parser(
"<custom>",
help="Run a custom command",
description=(
"keyword arguments:\n When running a custom command, you can "
"add keyword arguments by appending them to your command\n\n"
"\tsanic inspect foo --one=1 --two=2"
),
formatter_class=SanicHelpFormatter,
)
custom.add_argument(
"positional",
nargs="*",
help="Add one or more non-keyword args to your custom command",
)
| InspectorSubParser |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/models/contexts/python_registry_publish.py | {
"start": 517,
"end": 5038
} | class ____(PipelineContext):
def __init__(
self,
python_registry_token: Secret,
registry_check_url: str,
package_path: str,
report_output_prefix: str,
is_local: bool,
git_branch: str,
git_revision: str,
diffed_branch: str,
git_repo_url: str,
ci_report_bucket: Optional[str] = None,
registry: str = DEFAULT_PYTHON_PACKAGE_REGISTRY_URL,
gha_workflow_run_url: Optional[str] = None,
dagger_logs_url: Optional[str] = None,
pipeline_start_timestamp: Optional[int] = None,
ci_context: Optional[str] = None,
ci_gcp_credentials: Optional[Secret] = None,
package_name: Optional[str] = None,
version: Optional[str] = None,
) -> None:
self.python_registry_token = python_registry_token
self.registry = registry
self.registry_check_url = registry_check_url
self.package_path = package_path
self.package_metadata = PythonPackageMetadata(package_name, version)
pipeline_name = f"Publish PyPI {package_path}"
super().__init__(
pipeline_name=pipeline_name,
report_output_prefix=report_output_prefix,
ci_report_bucket=ci_report_bucket,
is_local=is_local,
git_branch=git_branch,
git_revision=git_revision,
diffed_branch=diffed_branch,
git_repo_url=git_repo_url,
gha_workflow_run_url=gha_workflow_run_url,
dagger_logs_url=dagger_logs_url,
pipeline_start_timestamp=pipeline_start_timestamp,
ci_context=ci_context,
ci_gcp_credentials=ci_gcp_credentials,
)
@classmethod
async def from_publish_connector_context(
cls: Type["PythonRegistryPublishContext"], connector_context: PublishConnectorContext
) -> Optional["PythonRegistryPublishContext"]:
"""
Create a PythonRegistryPublishContext from a ConnectorContext.
The metadata of the connector is read from the current workdir to capture changes that are not yet published.
If pypi is not enabled, this will return None.
"""
current_metadata = connector_context.connector.metadata
connector_context.logger.info(f"Current metadata: {str(current_metadata)}")
if (
"remoteRegistries" not in current_metadata
or "pypi" not in current_metadata["remoteRegistries"]
or not current_metadata["remoteRegistries"]["pypi"]["enabled"]
):
return None
version = current_metadata["dockerImageTag"]
if connector_context.pre_release:
# use current date as pre-release version
# we can't use the git revision because not all python registries allow local version identifiers. Public version identifiers must conform to PEP 440 and only allow digits.
release_candidate_tag = datetime.now().strftime("%Y%m%d%H%M")
version = f"{version}.dev{release_candidate_tag}"
assert connector_context.python_registry_token is not None, "The connector context must have python_registry_token Secret attribute"
pypi_context = cls(
python_registry_token=connector_context.python_registry_token,
registry=str(connector_context.python_registry_url),
registry_check_url=str(connector_context.python_registry_check_url),
package_path=str(connector_context.connector.code_directory),
package_name=current_metadata["remoteRegistries"]["pypi"]["packageName"],
version=version,
ci_report_bucket=connector_context.ci_report_bucket,
report_output_prefix=connector_context.report_output_prefix,
is_local=connector_context.is_local,
git_branch=connector_context.git_branch,
git_revision=connector_context.git_revision,
diffed_branch=connector_context.diffed_branch,
git_repo_url=connector_context.git_repo_url,
gha_workflow_run_url=connector_context.gha_workflow_run_url,
dagger_logs_url=connector_context.dagger_logs_url,
pipeline_start_timestamp=connector_context.pipeline_start_timestamp,
ci_context=connector_context.ci_context,
ci_gcp_credentials=connector_context.ci_gcp_credentials,
)
pypi_context.dagger_client = connector_context.dagger_client
return pypi_context
| PythonRegistryPublishContext |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_alloy_db.py | {
"start": 31254,
"end": 45403
} | class ____:
def setup_method(self):
self.operator = AlloyDBCreateInstanceOperator(
task_id=TEST_TASK_ID,
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
instance_configuration=TEST_INSTANCE,
is_secondary=TEST_IS_SECONDARY,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
gcp_conn_id=TEST_GCP_CONN_ID,
request_id=TEST_REQUEST_ID,
validate_request=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_init(self):
assert self.operator.instance_id == TEST_INSTANCE_ID
assert self.operator.cluster_id == TEST_CLUSTER_ID
assert self.operator.instance_configuration == TEST_INSTANCE
assert self.operator.is_secondary == TEST_IS_SECONDARY
def test_template_fields(self):
expected_template_fields = {
"cluster_id",
"instance_id",
"is_secondary",
"instance_configuration",
} | set(AlloyDBWriteBaseOperator.template_fields)
assert set(AlloyDBCreateInstanceOperator.template_fields) == expected_template_fields
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_get_instance_not_found(self, mock_hook, mock_log):
mock_get_instance = mock_hook.return_value.get_instance
mock_get_instance.side_effect = NotFound("Not found")
result = self.operator._get_instance()
mock_get_instance.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
instance_id=TEST_INSTANCE_ID,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
)
mock_log.info.assert_has_calls(
[
call("Checking if the instance %s exists already...", TEST_INSTANCE_ID),
call("The instance %s does not exist yet.", TEST_INSTANCE_ID),
]
)
assert result is None
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_get_instance_exception(self, mock_hook, mock_log):
mock_get_instance = mock_hook.return_value.get_instance
mock_get_instance.side_effect = Exception("Test exception")
with pytest.raises(AirflowException):
self.operator._get_instance()
mock_get_instance.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
instance_id=TEST_INSTANCE_ID,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
)
mock_log.info.assert_called_once_with(
"Checking if the instance %s exists already...", TEST_INSTANCE_ID
)
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_get_instance(self, mock_hook, mock_log, mock_to_dict):
mock_get_instance = mock_hook.return_value.get_instance
mock_instance = mock_get_instance.return_value
expected_result = mock_to_dict.return_value
result = self.operator._get_instance()
mock_get_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
)
mock_log.info.assert_has_calls(
[
call("Checking if the instance %s exists already...", TEST_INSTANCE_ID),
call(
"AlloyDB instance %s already exists in the cluster %s.", TEST_CLUSTER_ID, TEST_INSTANCE_ID
),
]
)
mock_to_dict.assert_called_once_with(mock_instance)
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("_get_instance"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute(
self,
mock_hook,
mock_log,
mock_get_operation_result,
mock_get_instance,
mock_to_dict,
):
mock_get_instance.return_value = None
mock_create_instance = mock_hook.return_value.create_instance
mock_create_secondary_instance = mock_hook.return_value.create_secondary_instance
mock_operation = mock_create_instance.return_value
mock_operation_result = mock_get_operation_result.return_value
expected_result = mock_to_dict.return_value
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Creating an AlloyDB instance.")
mock_get_instance.assert_called_once()
mock_create_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
instance=TEST_INSTANCE,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_create_secondary_instance.called
mock_to_dict.assert_called_once_with(mock_operation_result)
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("_get_instance"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_is_secondary(
self,
mock_hook,
mock_log,
mock_get_operation_result,
mock_get_instance,
mock_to_dict,
):
mock_get_instance.return_value = None
mock_create_instance = mock_hook.return_value.create_instance
mock_create_secondary_instance = mock_hook.return_value.create_secondary_instance
mock_operation = mock_create_secondary_instance.return_value
mock_operation_result = mock_get_operation_result.return_value
expected_result = mock_to_dict.return_value
mock_context = mock.MagicMock()
self.operator.is_secondary = True
result = self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Creating an AlloyDB instance.")
mock_get_instance.assert_called_once()
assert not mock_create_instance.called
mock_create_secondary_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
instance=TEST_INSTANCE,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_to_dict.assert_called_once_with(mock_operation_result)
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("_get_instance"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_validate_request(
self,
mock_hook,
mock_log,
mock_get_operation_result,
mock_get_instance,
mock_to_dict,
):
mock_get_instance.return_value = None
mock_create_instance = mock_hook.return_value.create_instance
mock_create_secondary_instance = mock_hook.return_value.create_secondary_instance
mock_operation = mock_create_instance.return_value
mock_get_operation_result.return_value = None
mock_context = mock.MagicMock()
self.operator.validate_request = True
result = self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Validating a Create AlloyDB instance request.")
mock_get_instance.assert_called_once()
mock_create_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
instance=TEST_INSTANCE,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=True,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_create_secondary_instance.called
assert not mock_to_dict.called
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result is None
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("_get_instance"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_validate_request_is_secondary(
self,
mock_hook,
mock_log,
mock_get_operation_result,
mock_get_instance,
mock_to_dict,
):
mock_get_instance.return_value = None
mock_create_instance = mock_hook.return_value.create_instance
mock_create_secondary_instance = mock_hook.return_value.create_secondary_instance
mock_operation = mock_create_secondary_instance.return_value
mock_get_operation_result.return_value = None
mock_context = mock.MagicMock()
self.operator.validate_request = True
self.operator.is_secondary = True
result = self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Validating a Create AlloyDB instance request.")
mock_get_instance.assert_called_once()
mock_create_secondary_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
instance=TEST_INSTANCE,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=True,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_create_instance.called
assert not mock_to_dict.called
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result is None
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("_get_instance"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_already_exists(
self,
mock_hook,
mock_log,
mock_get_operation_result,
mock_get_instance,
):
expected_result = mock_get_instance.return_value
mock_create_instance = mock_hook.return_value.create_instance
mock_create_secondary_instance = mock_hook.return_value.create_secondary_instance
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
assert not mock_log.info.called
mock_get_instance.assert_called_once()
assert not mock_create_instance.called
assert not mock_create_secondary_instance.called
assert not mock_get_operation_result.called
assert result == expected_result
@mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("_get_instance"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(CREATE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_exception(
self,
mock_hook,
mock_log,
mock_get_operation_result,
mock_get_instance,
mock_to_dict,
):
mock_get_instance.return_value = None
mock_create_instance = mock_hook.return_value.create_instance
mock_create_secondary_instance = mock_hook.return_value.create_secondary_instance
mock_create_instance.side_effect = Exception()
mock_context = mock.MagicMock()
with pytest.raises(AirflowException):
self.operator.execute(context=mock_context)
mock_log.info.assert_called_once_with("Creating an AlloyDB instance.")
mock_get_instance.assert_called_once()
mock_create_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
instance=TEST_INSTANCE,
location=TEST_GCP_REGION,
project_id=TEST_GCP_PROJECT,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_create_secondary_instance.called
assert not mock_to_dict.called
assert not mock_get_operation_result.called
| TestAlloyDBCreateInstanceOperator |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/glib2.py | {
"start": 10021,
"end": 10929
} | class ____(Task.Task):
color = 'BLUE'
base_cmd = '${GLIB_COMPILE_RESOURCES} --sourcedir=${SRC[0].parent.srcpath()} --sourcedir=${SRC[0].bld_dir()}'
def scan(self):
bld = self.generator.bld
kw = {}
kw['cwd'] = self.get_cwd()
kw['quiet'] = Context.BOTH
cmd = Utils.subst_vars(
'${GLIB_COMPILE_RESOURCES} --sourcedir=%s --sourcedir=%s --generate-dependencies %s' %
(self.inputs[0].parent.srcpath(), self.inputs[0].bld_dir(), self.inputs[0].bldpath()), self.env
)
output = bld.cmd_and_log(cmd, **kw)
nodes = []
names = []
for dep in output.splitlines():
if dep:
node = bld.bldnode.find_node(dep)
if node:
nodes.append(node)
else:
names.append(dep)
return (nodes, names)
| glib_gresource_base |
python | getsentry__sentry | tests/sentry/runner/commands/test_killswitches.py | {
"start": 263,
"end": 5410
} | class ____(CliTestCase):
command = killswitches
@mock.patch(
"sentry.killswitches.ALL_KILLSWITCH_OPTIONS",
{
OPTION: KillswitchInfo(
description="the description", fields={"project_id": "hey", "event_type": "ho"}
)
},
)
def test_basic(self) -> None:
assert self.invoke("list").output == (
"\n"
"store.load-shed-group-creation-projects\n"
" # the description\n"
"<disabled entirely>\n"
)
PREAMBLE = (
"# store.load-shed-group-creation-projects: the description\n"
"# \n"
"# After saving and exiting, your killswitch conditions will be printed\n"
"# in faux-SQL for you to confirm.\n"
"# \n"
"# Below a template is given for a single condition. The condition's\n"
"# fields will be joined with AND, while all conditions will be joined\n"
"# with OR. All fields need to be set, but can be set to null/~, which is\n"
"# a wildcard.\n"
"# \n"
"# - # ho\n"
"# event_type: null\n"
"# # hey\n"
"# project_id: null"
)
assert self.invoke("pull", OPTION, "-").output == PREAMBLE
rv = self.invoke(
"push", "--yes", OPTION, "-", input=("- project_id: 42\n event_type: transaction\n")
)
assert rv.exit_code == 0
assert self.invoke("list").output == (
"\n"
"store.load-shed-group-creation-projects\n"
" # the description\n"
"DROP DATA WHERE\n"
" (project_id = 42 AND event_type = transaction)\n"
)
assert self.invoke("pull", OPTION, "-").output == PREAMBLE + (
"\n" "\n" "- event_type: transaction\n" " project_id: '42'\n"
)
rv = self.invoke(
"push",
"--yes",
OPTION,
"-",
input=(
"- project_id: 42\n"
" event_type: transaction\n"
"- project_id: 43\n"
" event_type: ~\n"
),
)
assert rv.exit_code == 0
assert self.invoke("list").output == (
"\n"
"store.load-shed-group-creation-projects\n"
" # the description\n"
"DROP DATA WHERE\n"
" (project_id = 42 AND event_type = transaction) OR\n"
" (project_id = 43 AND event_type = *)\n"
)
assert self.invoke("pull", OPTION, "-").output == PREAMBLE + (
"\n"
"\n"
"- event_type: transaction\n"
" project_id: '42'\n"
"- event_type: null\n"
" project_id: '43'\n"
)
rv = self.invoke(
"push",
"--yes",
OPTION,
"-",
input="\n",
)
assert rv.exit_code == 0
assert self.invoke("list").output == (
"\n"
"store.load-shed-group-creation-projects\n"
" # the description\n"
"<disabled entirely>\n"
)
assert self.invoke("pull", OPTION, "-").output == PREAMBLE
@mock.patch(
"sentry.tasks.relay.schedule_invalidate_project_config",
)
@mock.patch(
"sentry.options.set",
)
def test_relay_drop_transaction_metrics(
self, mock_set: mock.MagicMock, mock_schedule: mock.MagicMock
) -> None:
option = "relay.drop-transaction-metrics"
rv = self.invoke("push", "--yes", option, "-", input=("- project_id: 42\n"))
assert rv.exit_code == 0, rv.output
assert mock_set.mock_calls == [
mock.call(
"relay.drop-transaction-metrics",
[{"project_id": "42"}],
channel=options.UpdateChannel.KILLSWITCH,
)
]
assert mock_schedule.mock_calls == [
mock.call(project_id="42", trigger="killswitches.relay.drop-transaction-metrics")
]
@mock.patch(
"sentry.tasks.relay.schedule_invalidate_project_config",
)
@mock.patch(
"sentry.options.set",
)
def test_relay_drop_transaction_metrics_all(
self, mock_set: mock.MagicMock, mock_schedule: mock.MagicMock
) -> None:
self.organization
option = "relay.drop-transaction-metrics"
rv = self.invoke("push", "--yes", option, "-", input=("- project_id: null\n"))
assert rv.exit_code == 0, rv.output
assert mock_set.mock_calls == [
mock.call(
"relay.drop-transaction-metrics",
[{"project_id": None}],
channel=options.UpdateChannel.KILLSWITCH,
)
]
# All organizations should have been invalidated:
assert mock_schedule.mock_calls == [
mock.call(
trigger="invalidate-all",
organization_id=mock_schedule.mock_calls[0].kwargs["organization_id"],
countdown=0,
)
]
| KillswitchesTest |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 98239,
"end": 102851
} | class ____(fixtures.TestBase):
def setup_test(self):
metadata = MetaData()
self.test_table = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("test_column", JSON),
)
self.jsoncol = self.test_table.c.test_column
self.dialect = default.DefaultDialect()
self.dialect._json_serializer = None
self.dialect._json_deserializer = None
def test_bind_serialize_default(self):
proc = self.test_table.c.test_column.type._cached_bind_processor(
self.dialect
)
eq_(
proc({"A": [1, 2, 3, True, False]}),
'{"A": [1, 2, 3, true, false]}',
)
def test_bind_serialize_None(self):
proc = self.test_table.c.test_column.type._cached_bind_processor(
self.dialect
)
eq_(proc(None), "null")
def test_bind_serialize_none_as_null(self):
proc = JSON(none_as_null=True)._cached_bind_processor(self.dialect)
eq_(proc(None), None)
eq_(proc(null()), None)
def test_bind_serialize_null(self):
proc = self.test_table.c.test_column.type._cached_bind_processor(
self.dialect
)
eq_(proc(null()), None)
def test_result_deserialize_default(self):
proc = self.test_table.c.test_column.type._cached_result_processor(
self.dialect, None
)
eq_(
proc('{"A": [1, 2, 3, true, false]}'),
{"A": [1, 2, 3, True, False]},
)
def test_result_deserialize_null(self):
proc = self.test_table.c.test_column.type._cached_result_processor(
self.dialect, None
)
eq_(proc("null"), None)
def test_result_deserialize_None(self):
proc = self.test_table.c.test_column.type._cached_result_processor(
self.dialect, None
)
eq_(proc(None), None)
def _dialect_index_fixture(self, int_processor, str_processor):
class MyInt(Integer):
def bind_processor(self, dialect):
return lambda value: value + 10
def literal_processor(self, diaect):
return lambda value: str(value + 15)
class MyString(String):
def bind_processor(self, dialect):
return lambda value: value + "10"
def literal_processor(self, diaect):
return lambda value: value + "15"
class MyDialect(default.DefaultDialect):
colspecs = {}
if int_processor:
colspecs[Integer] = MyInt
if str_processor:
colspecs[String] = MyString
return MyDialect()
def test_index_bind_proc_int(self):
expr = self.test_table.c.test_column[5]
int_dialect = self._dialect_index_fixture(True, True)
non_int_dialect = self._dialect_index_fixture(False, True)
bindproc = expr.right.type._cached_bind_processor(int_dialect)
eq_(bindproc(expr.right.value), 15)
bindproc = expr.right.type._cached_bind_processor(non_int_dialect)
eq_(bindproc(expr.right.value), 5)
def test_index_literal_proc_int(self):
expr = self.test_table.c.test_column[5]
int_dialect = self._dialect_index_fixture(True, True)
non_int_dialect = self._dialect_index_fixture(False, True)
bindproc = expr.right.type._cached_literal_processor(int_dialect)
eq_(bindproc(expr.right.value), "20")
bindproc = expr.right.type._cached_literal_processor(non_int_dialect)
eq_(bindproc(expr.right.value), "5")
def test_index_bind_proc_str(self):
expr = self.test_table.c.test_column["five"]
str_dialect = self._dialect_index_fixture(True, True)
non_str_dialect = self._dialect_index_fixture(False, False)
bindproc = expr.right.type._cached_bind_processor(str_dialect)
eq_(bindproc(expr.right.value), "five10")
bindproc = expr.right.type._cached_bind_processor(non_str_dialect)
eq_(bindproc(expr.right.value), "five")
def test_index_literal_proc_str(self):
expr = self.test_table.c.test_column["five"]
str_dialect = self._dialect_index_fixture(True, True)
non_str_dialect = self._dialect_index_fixture(False, False)
bindproc = expr.right.type._cached_literal_processor(str_dialect)
eq_(bindproc(expr.right.value), "five15")
bindproc = expr.right.type._cached_literal_processor(non_str_dialect)
eq_(bindproc(expr.right.value), "'five'")
| JSONTest |
python | huggingface__transformers | src/transformers/models/glm4v/configuration_glm4v.py | {
"start": 5545,
"end": 11972
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
GLM-4.1V model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm4v model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Glm4vModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 13696):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 32768):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
```python
>>> from transformers import Glm4vTextModel, Glm4vConfig
>>> # Initializing a GLM-4.1V style configuration
>>> configuration = Glm4vConfig()
>>> # Initializing a model from the GLM-4.1V style configuration
>>> model = Glm4vTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glm4v_text"
base_config_key = "text_config"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `Glm4v`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_up_proj": "colwise_rep", # we need to replicate here due to the `chunk` operation
"layers.*.mlp.down_proj": "rowwise_rep", # we need to replicate here due to the `chunk` operation
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 151552,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 13696,
num_hidden_layers: Optional[int] = 40,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 2,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 32768,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-05,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.rope_parameters = rope_parameters
super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs)
| Glm4vTextConfig |
python | getsentry__sentry | src/sentry/integrations/repository/metric_alert.py | {
"start": 1803,
"end": 2808
} | class ____(BaseNewNotificationMessage):
incident_id: int | None = None
trigger_action_id: int | None = None
def get_validation_error(self) -> Exception | None:
error = super().get_validation_error()
if error is not None:
return error
if self.message_identifier is not None:
# If a message_identifier exists, that means a successful notification happened for an incident and trigger
# This means that neither of them can be empty
if self.incident_id is None or self.trigger_action_id is None:
return IncidentAndTriggerActionValidationError()
# We can create a NotificationMessage if it has both, or neither, of incident and trigger.
# The following is an XNOR check for incident and trigger
if (self.incident_id is not None) != (self.trigger_action_id is not None):
return IncidentAndTriggerActionValidationError()
return None
| NewMetricAlertNotificationMessage |
python | numba__numba | numba/tests/test_datamodel.py | {
"start": 4548,
"end": 5260
} | class ____(unittest.TestCase):
def setUp(self):
self.dmm = datamodel.default_manager
def test_number(self):
ty = types.int32
dm = self.dmm[ty]
self.assertFalse(dm.contains_nrt_meminfo())
def test_array(self):
ty = types.int32[:]
dm = self.dmm[ty]
self.assertTrue(dm.contains_nrt_meminfo())
def test_tuple_of_number(self):
ty = types.UniTuple(dtype=types.int32, count=2)
dm = self.dmm[ty]
self.assertFalse(dm.contains_nrt_meminfo())
def test_tuple_of_array(self):
ty = types.UniTuple(dtype=types.int32[:], count=2)
dm = self.dmm[ty]
self.assertTrue(dm.contains_nrt_meminfo())
| TestMemInfo |
python | coleifer__peewee | tests/regressions.py | {
"start": 11411,
"end": 13561
} | class ____(ModelTestCase):
requires = [User, Tweet]
def test_returning_integration_subqueries(self):
_create_users_tweets(self.database)
# We can use a correlated subquery in the RETURNING clause.
subq = (Tweet
.select(fn.COUNT(Tweet.id).alias('ct'))
.where(Tweet.user == User.id))
query = (User
.update(username=(User.username + '-x'))
.returning(subq.alias('ct'), User.username))
result = query.execute()
self.assertEqual(sorted([(r.ct, r.username) for r in result]), [
(0, 'zaizee-x'), (2, 'mickey-x'), (3, 'huey-x')])
# We can use a correlated subquery via UPDATE...FROM, and reference the
# FROM table in both the update and the RETURNING clause.
subq = (User
.select(User.id, fn.COUNT(Tweet.id).alias('ct'))
.join(Tweet, JOIN.LEFT_OUTER)
.group_by(User.id))
query = (User
.update(username=User.username + subq.c.ct)
.from_(subq)
.where(User.id == subq.c.id)
.returning(subq.c.ct, User.username))
result = query.execute()
self.assertEqual(sorted([(r.ct, r.username) for r in result]), [
(0, 'zaizee-x0'), (2, 'mickey-x2'), (3, 'huey-x3')])
def test_returning_integration(self):
query = (User
.insert_many([('huey',), ('mickey',), ('zaizee',)],
fields=[User.username])
.returning(User.id, User.username)
.objects())
result = query.execute()
self.assertEqual([(r.id, r.username) for r in result], [
(1, 'huey'), (2, 'mickey'), (3, 'zaizee')])
query = (User
.delete()
.where(~User.username.startswith('h'))
.returning(User.id, User.username)
.objects())
result = query.execute()
self.assertEqual(sorted([(r.id, r.username) for r in result]), [
(2, 'mickey'), (3, 'zaizee')])
| TestReturningIntegrationRegressions |
python | OmkarPathak__pygorithm | tests/test_sorting.py | {
"start": 2650,
"end": 2839
} | class ____(unittest.TestCase, TestSortingAlgorithm):
inplace = True
alph_support = True
@staticmethod
def sort(arr):
return selection_sort.sort(arr)
| TestSelectionSort |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/loader.py | {
"start": 1850,
"end": 2412
} | class ____(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
self.comment_handling = None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
Constructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
| Loader |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/concrete_function.py | {
"start": 43541,
"end": 45210
} | class ____(object):
"""Holds the state of a function call between execution and recording."""
__slots__ = [
"_functions", "_inference_args", "_input_tangents", "_tape_watching"
]
def __init__(self, functions, inference_args, input_tangents, tape_watching):
"""Collects information about the function call.
Args:
functions: An object which produces forward and backward functions, either
a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object.
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
tape_watching: Boolean, with True indicating that recording is necessary.
"""
self._functions = functions
self._inference_args = inference_args
self._input_tangents = input_tangents
self._tape_watching = tape_watching
def forward(self):
"""Builds or retrieves a forward function for this call."""
forward_function = self._functions.forward(
self._inference_args, self._input_tangents
)
return forward_function, self._inference_args + self._input_tangents
def record(self, flat_outputs):
"""Given outputs from the execution of `forward`, records the operation."""
if (
self._tape_watching
and not isinstance(flat_outputs, ops.Operation)
and flat_outputs is not None
):
# We only record function calls which have outputs, and then only when a
# tape is watching.
self._functions.record(
flat_outputs, self._inference_args, self._input_tangents
)
| _ForwardBackwardCall |
python | ethereum__web3.py | web3/types.py | {
"start": 15129,
"end": 15319
} | class ____(TypedDict, total=False):
address: (
Address | ChecksumAddress | ENS | Sequence[Address | ChecksumAddress | ENS]
)
topics: Sequence[TopicFilter]
| LogsSubscriptionArg |
python | huggingface__transformers | src/transformers/models/pvt/modeling_pvt.py | {
"start": 5985,
"end": 6426
} | class ____(nn.Module):
def __init__(self, config: PvtConfig, hidden_size: int):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| PvtSelfOutput |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-genesys/source_genesys/source.py | {
"start": 2125,
"end": 2407
} | class ____(GenesysStream):
"""
API Docs: https://developer.genesys.cloud/routing/routing/
"""
page_size = 200
primary_key = "id"
cursor_field = "dateModified"
def path(self, **kwargs) -> str:
return "routing/assessments"
| RoutingRoutingAssessments |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/common.py | {
"start": 1578,
"end": 1724
} | class ____(enum.Enum):
"""Bulk Action to be taken if the entity does not exist."""
FAIL = "fail"
SKIP = "skip"
| BulkActionNotOnExistence |
python | rq__rq | tests/fixtures.py | {
"start": 3185,
"end": 3257
} | class ____:
def __repr__(self):
return 'é'
| UnicodeStringObject |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 40179,
"end": 40519
} | class ____(BaseModel):
type: Literal["AddFields"]
fields: List[AddedFieldDefinition] = Field(
...,
description="List of transformations (path and corresponding value) that will be added to the record.",
title="Fields",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| AddFields |
python | django__django | tests/prefetch_related/models.py | {
"start": 2769,
"end": 2947
} | class ____(models.QuerySet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._iterable_class = ModelIterableSubclass
| TeacherQuerySet |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-file/source_file/client.py | {
"start": 1268,
"end": 9890
} | class ____:
"""Class to manage read from file located at different providers
Supported examples of URL this class can accept are as follows:
```
s3://my_bucket/my_key
s3://my_key:my_secret@my_bucket/my_key
gs://my_bucket/my_blob
hdfs:///path/file (not tested)
hdfs://path/file (not tested)
webhdfs://host:port/path/file (not tested)
./local/path/file
~/local/path/file
local/path/file
./local/path/file.gz
file:///home/user/file
file:///home/user/file.bz2
[ssh|scp|sftp]://username@host//path/file
[ssh|scp|sftp]://username@host/path/file
[ssh|scp|sftp]://username:password@host/path/file
```
"""
def __init__(self, url: str, provider: dict, binary=None, encoding=None):
self._url = url
self._provider = provider
self._file = None
self.args = {
"mode": "rb" if binary else "r",
"encoding": None if binary else encoding,
}
def __enter__(self):
return self._file
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def full_url(self):
return f"{self.storage_scheme}{self.url}"
def close(self):
if self._file:
self._file.close()
self._file = None
def backoff_giveup(self, error):
# https://github.com/airbytehq/oncall/issues/1954
if isinstance(error, SSHException) and str(error).startswith("Error reading SSH protocol banner"):
# We need to clear smart_open internal _SSH cache from the previous attempt, otherwise:
# SSHException('SSH session not active')
# will be raised
smart_open.ssh._SSH.clear()
return False
return True
def open(self):
self.close()
_open = backoff.on_exception(backoff.expo, Exception, max_tries=5, giveup=self.backoff_giveup)(self._open)
try:
self._file = _open()
except google.api_core.exceptions.NotFound as err:
raise FileNotFoundError(self.url) from err
return self
def _open(self):
storage = self.storage_scheme
url = self.url
if storage == "gs://":
return self._open_gcs_url()
elif storage == "s3://":
return self._open_aws_url()
elif storage == "azure://":
return self._open_azblob_url()
elif storage == "webhdfs://":
host = self._provider["host"]
port = self._provider["port"]
return smart_open.open(f"webhdfs://{host}:{port}/{url}", **self.args)
elif storage in ("ssh://", "scp://", "sftp://"):
# We need to quote parameters to deal with special characters
# https://bugs.python.org/issue18140
user = urllib.parse.quote(self._provider["user"])
host = urllib.parse.quote(self._provider["host"])
url = urllib.parse.quote(url)
# TODO: Remove int casting when https://github.com/airbytehq/airbyte/issues/4952 is addressed
# TODO: The "port" field in spec.json must also be changed
_port_value = self._provider.get("port", 22)
try:
port = int(_port_value)
except ValueError as err:
raise ValueError(f"{_port_value} is not a valid integer for the port") from err
# Explicitly turn off ssh keys stored in ~/.ssh
transport_params = {"connect_kwargs": {"look_for_keys": False}, "timeout": SSH_TIMEOUT}
if "password" in self._provider:
password = urllib.parse.quote(self._provider["password"])
uri = f"{storage}{user}:{password}@{host}:{port}/{url}"
else:
uri = f"{storage}{user}@{host}:{port}/{url}"
return smart_open.open(uri, transport_params=transport_params, **self.args)
elif storage in ("https://", "http://"):
transport_params = None
if "user_agent" in self._provider and self._provider["user_agent"]:
airbyte_version = environ.get("AIRBYTE_VERSION", "0.0")
transport_params = {"headers": {"Accept-Encoding": "identity", "User-Agent": f"Airbyte/{airbyte_version}"}}
logger.info(f"TransportParams: {transport_params}")
return smart_open.open(self.full_url, transport_params=transport_params, **self.args)
return smart_open.open(self.full_url, **self.args)
@property
def url(self) -> str:
"""Convert URL to remove the URL prefix (scheme)
:return: the corresponding URL without URL prefix / scheme
"""
parse_result = urlparse(self._url)
if parse_result.scheme:
return self._url.split("://")[-1]
else:
return self._url
@property
def storage_scheme(self) -> str:
"""Convert Storage Names to the proper URL Prefix
:return: the corresponding URL prefix / scheme
"""
storage_name = self._provider["storage"].upper()
parse_result = urlparse(self._url)
if storage_name == "GCS":
return "gs://"
elif storage_name == "S3":
return "s3://"
elif storage_name == "AZBLOB":
return "azure://"
elif storage_name == "HTTPS":
return "https://"
elif storage_name == "SSH" or storage_name == "SCP":
return "scp://"
elif storage_name == "SFTP":
return "sftp://"
elif storage_name == "WEBHDFS":
return "webhdfs://"
elif storage_name == "LOCAL":
return "file://"
elif parse_result.scheme:
return parse_result.scheme
logger.error(f"Unknown Storage provider in: {self._url}")
return ""
def _open_gcs_url(self) -> object:
service_account_json = self._provider.get("service_account_json")
credentials = None
if service_account_json:
try:
credentials = json.loads(self._provider["service_account_json"])
except json.decoder.JSONDecodeError as err:
error_msg = f"Failed to parse gcs service account json: {repr(err)}"
logger.error(f"{error_msg}\n{traceback.format_exc()}")
raise AirbyteTracedException(message=error_msg, internal_message=error_msg, failure_type=FailureType.config_error) from err
if credentials:
credentials = service_account.Credentials.from_service_account_info(credentials)
client = GCSClient(credentials=credentials, project=credentials._project_id)
else:
client = GCSClient.create_anonymous_client()
file_to_close = smart_open.open(self.full_url, transport_params={"client": client}, **self.args)
return file_to_close
def _open_aws_url(self):
aws_access_key_id = self._provider.get("aws_access_key_id")
aws_secret_access_key = self._provider.get("aws_secret_access_key")
use_aws_account = aws_access_key_id and aws_secret_access_key
if use_aws_account:
aws_access_key_id = self._provider.get("aws_access_key_id", "")
aws_secret_access_key = self._provider.get("aws_secret_access_key", "")
url = f"{self.storage_scheme}{aws_access_key_id}:{aws_secret_access_key}@{self.url}"
result = smart_open.open(url, **self.args)
else:
config = botocore.client.Config(signature_version=botocore.UNSIGNED)
params = {"client": boto3.client("s3", config=config)}
result = smart_open.open(self.full_url, transport_params=params, **self.args)
return result
def _open_azblob_url(self):
storage_account = self._provider.get("storage_account")
storage_acc_url = f"https://{storage_account}.blob.core.windows.net"
sas_token = self._provider.get("sas_token", None)
shared_key = self._provider.get("shared_key", None)
# if both keys are provided, shared_key is preferred as has permissions on entire storage account
credential = shared_key or sas_token
if credential:
client = BlobServiceClient(account_url=storage_acc_url, credential=credential)
else:
# assuming anonymous public read access given no credential
client = BlobServiceClient(account_url=storage_acc_url)
url = f"{self.storage_scheme}{self.url}"
return smart_open.open(url, transport_params=dict(client=client), **self.args)
| URLFile |
python | facebook__pyre-check | tools/incremental_test/tests/test_environment.py | {
"start": 457,
"end": 627
} | class ____:
working_directory: Path
command: str
stdin: Final[Optional[str]] = None
MockExecuteCallable = Callable[[CommandInput], CommandOutput]
| CommandInput |
python | huggingface__transformers | src/transformers/models/apertus/modeling_apertus.py | {
"start": 14794,
"end": 15345
} | class ____(PreTrainedModel):
config: ApertusConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["ApertusDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ApertusDecoderLayer,
"attentions": ApertusAttention,
}
@auto_docstring
| ApertusPreTrainedModel |
python | lepture__authlib | authlib/integrations/django_client/apps.py | {
"start": 321,
"end": 1170
} | class ____:
def save_authorize_data(self, request, **kwargs):
state = kwargs.pop("state", None)
if state:
self.framework.set_state_data(request.session, state, kwargs)
else:
raise RuntimeError("Missing state value")
def authorize_redirect(self, request, redirect_uri=None, **kwargs):
"""Create a HTTP Redirect for Authorization Endpoint.
:param request: HTTP request instance from Django view.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: A HTTP redirect response.
"""
rv = self.create_authorization_url(redirect_uri, **kwargs)
self.save_authorize_data(request, redirect_uri=redirect_uri, **rv)
return HttpResponseRedirect(rv["url"])
| DjangoAppMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.