language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/diamond_link_top/package.py | {
"start": 217,
"end": 573
} | class ____(Package):
"""Part of diamond-link-{top,left,right,bottom} group"""
homepage = "http://www.example.com"
url = "http://www.example.com/diamond-link-top-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("diamond-link-left", type="link")
depends_on("diamond-link-right", type="link")
| DiamondLinkTop |
python | huggingface__transformers | src/transformers/models/whisper/modeling_whisper.py | {
"start": 40000,
"end": 48746
} | class ____(WhisperPreTrainedModel):
def __init__(self, config: WhisperConfig):
super().__init__(config)
self.encoder = WhisperEncoder(config)
self.decoder = WhisperDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def freeze_encoder(self):
"""
Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
not be updated during training.
"""
self.encoder._freeze_parameters()
def _mask_input_features(
self,
input_features: torch.FloatTensor,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return input_features
# generate indices & apply SpecAugment along time axis
batch_size, hidden_size, sequence_length = input_features.size()
if self.config.mask_time_prob > 0 and self.training:
# generate indices & apply SpecAugment along time axis
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool)
mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1)
input_features[mask_time_indices] = 0
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool)
input_features[mask_feature_indices] = 0
return input_features
@auto_docstring
def forward(
self,
input_features: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]] = None,
decoder_position_ids: Optional[tuple[torch.LongTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple[torch.Tensor], Seq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read
[`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART
paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, WhisperModel
>>> from datasets import load_dataset
>>> model = WhisperModel.from_pretrained("openai/whisper-base")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_features = inputs.input_features
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 512]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
input_features = self._mask_input_features(input_features, attention_mask=attention_mask)
encoder_outputs = self.encoder(
input_features,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
position_ids=decoder_position_ids,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The Whisper Model with a language modeling head. Can be used for automatic speech recognition.
"""
)
| WhisperModel |
python | django-import-export__django-import-export | tests/core/tests/test_instance_loaders.py | {
"start": 133,
"end": 376
} | class ____(TestCase):
def test_get_instance(self):
instance_loader = instance_loaders.BaseInstanceLoader(None)
with self.assertRaises(NotImplementedError):
instance_loader.get_instance(None)
| BaseInstanceLoaderTest |
python | wandb__wandb | wandb/sdk/launch/agent/config.py | {
"start": 1132,
"end": 1843
} | class ____(BaseModel):
"""Configuration for registry block.
Note that we don't forbid extra fields here because:
- We want to allow all fields supported by each registry
- We will perform validation on the registry object itself later
- Registry block is being deprecated in favor of destination field in builder
"""
type: Optional[RegistryType] = Field(
None,
description="The type of registry to use.",
)
uri: Optional[str] = Field(
None,
description="The URI of the registry.",
)
@validator("uri") # type: ignore
@classmethod
def validate_uri(cls, uri: str) -> str:
return validate_registry_uri(uri)
| RegistryConfig |
python | mlflow__mlflow | mlflow/openai/api_request_parallel_processor.py | {
"start": 1108,
"end": 4392
} | class ____:
"""Stores metadata about the script's progress. Only one instance is created."""
num_tasks_started: int = 0
num_tasks_in_progress: int = 0 # script ends when this reaches 0
num_tasks_succeeded: int = 0
num_tasks_failed: int = 0
num_rate_limit_errors: int = 0
lock: threading.Lock = threading.Lock()
error = None
def start_task(self):
with self.lock:
self.num_tasks_started += 1
self.num_tasks_in_progress += 1
def complete_task(self, *, success: bool):
with self.lock:
self.num_tasks_in_progress -= 1
if success:
self.num_tasks_succeeded += 1
else:
self.num_tasks_failed += 1
def increment_num_rate_limit_errors(self):
with self.lock:
self.num_rate_limit_errors += 1
def call_api(
index: int,
results: list[tuple[int, Any]],
task: Callable[[], Any],
status_tracker: StatusTracker,
):
import openai
status_tracker.start_task()
try:
result = task()
_logger.debug(f"Request #{index} succeeded")
status_tracker.complete_task(success=True)
results.append((index, result))
except openai.RateLimitError as e:
status_tracker.complete_task(success=False)
_logger.debug(f"Request #{index} failed with: {e}")
status_tracker.increment_num_rate_limit_errors()
status_tracker.error = mlflow.MlflowException(
f"Request #{index} failed with rate limit: {e}."
)
except Exception as e:
status_tracker.complete_task(success=False)
_logger.debug(f"Request #{index} failed with: {e}")
status_tracker.error = mlflow.MlflowException(
f"Request #{index} failed with: {e.__cause__}"
)
def process_api_requests(
request_tasks: list[Callable[[], Any]],
max_workers: int = 10,
):
"""Processes API requests in parallel"""
# initialize trackers
status_tracker = StatusTracker() # single instance to track a collection of variables
results: list[tuple[int, Any]] = []
request_tasks_iter = enumerate(request_tasks)
_logger.debug(f"Request pool executor will run {len(request_tasks)} requests")
with ThreadPoolExecutor(
max_workers=max_workers, thread_name_prefix="MlflowOpenAiApi"
) as executor:
futures = [
executor.submit(
call_api,
index=index,
task=task,
results=results,
status_tracker=status_tracker,
)
for index, task in request_tasks_iter
]
wait(futures, return_when=FIRST_EXCEPTION)
# after finishing, log final status
if status_tracker.num_tasks_failed > 0:
if status_tracker.num_tasks_failed == 1:
raise status_tracker.error
raise mlflow.MlflowException(
f"{status_tracker.num_tasks_failed} tasks failed. See logs for details."
)
if status_tracker.num_rate_limit_errors > 0:
_logger.debug(
f"{status_tracker.num_rate_limit_errors} rate limit errors received. "
"Consider running at a lower rate."
)
return [res for _, res in sorted(results)]
| StatusTracker |
python | psf__black | tests/data/cases/trailing_comma_optional_parens1.py | {
"start": 289,
"end": 581
} | class ____:
def get_help_text(self):
return ngettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length,
) % {'min_length': self.min_length}
| X |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_percent_diff_between_inclusive_threshold_range.py | {
"start": 947,
"end": 7913
} | class ____(
DataProfilerProfileMetricProvider
):
metric_name = (
"data_profiler.profile_numeric_columns_percent_diff_between_inclusive_threshold_range"
)
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - 22
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_percent_diff = metrics.get("data_profiler.profile_percent_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_percent_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_percent_diff["data_stats"]
requested_columns = {}
unavailable_stats = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
unavailable_stats[col] = {}
for stat, bounds in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "ERR_divide_by_zero" or diff_val == "ERR_no_original_value":
unavailable_stats[col][stat] = diff_val
requested_columns[col][stat] = diff_val
continue
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
between_bounds = is_value_between_bounds(
diff_val, bounds["lower"], bounds["upper"], inclusive=True
)
if not between_bounds:
requested_columns[col][stat] = {
"lower_bound": bounds["lower"],
"upper_bound": bounds["upper"],
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
for column in list(unavailable_stats.keys()):
if unavailable_stats[column] == {}:
unavailable_stats.pop(column, None)
if unavailable_stats != {}:
div_by_zero_stats = []
no_original_value = []
for column, stats in unavailable_stats.items():
current_col = copy.deepcopy(limit_check_report_keys_copy[column])
for stat, val in stats.items():
if val == "ERR_divide_by_zero":
div_by_zero_stats.append(column + ": " + stat)
current_col.pop(stat, None)
elif val == "ERR_no_original_value":
no_original_value.append(column + ": " + stat)
current_col.pop(stat, None)
limit_check_report_keys_copy[column] = current_col
warning = "\nWARNING:\n"
if len(div_by_zero_stats) > 0:
warning += "Div By Zero ERROR:\nValue in profile report was 0 for the following column: stat\n"
for div_by_zero_stat in div_by_zero_stats:
warning += " " + div_by_zero_stat + "\n"
if len(no_original_value) > 0:
warning += "Value not Found ERROR:\nStatistic was not found in profile report for the following column: stat\n"
for no_original_value_string in no_original_value:
warning += " " + no_original_value_string + "\n"
warning += "\nTo avoid these errors, you should use the replace 'limit_check_report_keys' with the following:\n"
warning += r"" + json.dumps(limit_check_report_keys_copy, indent=2)
warning += "\n"
warnings.warn(warning)
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_percent_diff_between_inclusive_threshold_range"
):
dependencies["data_profiler.profile_percent_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_percent_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsPercentDiffBetweenInclusiveThresholdRange |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 2360,
"end": 2840
} | class ____(ContractDataError):
"""
Raised when trying to deploy an interface or empty data.
"""
def __init__(self, contract_type: "ContractType"):
message = "Cannot deploy: contract"
if name := contract_type.name:
message = f"{message} '{name}'"
message = (
f"{message} has no deployment-bytecode. Are you attempting to deploy an interface?"
)
super().__init__(message)
| MissingDeploymentBytecodeError |
python | ray-project__ray | rllib/models/tf/tf_action_dist.py | {
"start": 14745,
"end": 18523
} | class ____(TFActionDistribution):
"""A tanh-squashed Gaussian distribution defined by: mean, std, low, high.
The distribution will never return low or high exactly, but
`low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively.
"""
def __init__(
self,
inputs: List[TensorType],
model: ModelV2,
low: float = -1.0,
high: float = 1.0,
):
"""Parameterizes the distribution via `inputs`.
Args:
low: The lowest possible sampling value
(excluding this value).
high: The highest possible sampling value
(excluding this value).
"""
assert tfp is not None
mean, log_std = tf.split(inputs, 2, axis=-1)
# Clip `scale` values (coming from NN) to reasonable values.
log_std = tf.clip_by_value(log_std, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT)
std = tf.exp(log_std)
self.distr = tfp.distributions.Normal(loc=mean, scale=std)
assert np.all(np.less(low, high))
self.low = low
self.high = high
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
mean = self.distr.mean()
return self._squash(mean)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self._squash(self.distr.sample())
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
# Unsquash values (from [low,high] to ]-inf,inf[)
unsquashed_values = tf.cast(self._unsquash(x), self.inputs.dtype)
# Get log prob of unsquashed values from our Normal.
log_prob_gaussian = self.distr.log_prob(unsquashed_values)
# For safety reasons, clamp somehow, only then sum up.
log_prob_gaussian = tf.clip_by_value(log_prob_gaussian, -100, 100)
log_prob_gaussian = tf.reduce_sum(log_prob_gaussian, axis=-1)
# Get log-prob for squashed Gaussian.
unsquashed_values_tanhd = tf.math.tanh(unsquashed_values)
log_prob = log_prob_gaussian - tf.reduce_sum(
tf.math.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER), axis=-1
)
return log_prob
def sample_logp(self):
z = self.distr.sample()
actions = self._squash(z)
return actions, tf.reduce_sum(
self.distr.log_prob(z) - tf.math.log(1 - actions * actions + SMALL_NUMBER),
axis=-1,
)
@override(ActionDistribution)
def entropy(self) -> TensorType:
raise ValueError("Entropy not defined for SquashedGaussian!")
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
raise ValueError("KL not defined for SquashedGaussian!")
def _squash(self, raw_values: TensorType) -> TensorType:
# Returned values are within [low, high] (including `low` and `high`).
squashed = ((tf.math.tanh(raw_values) + 1.0) / 2.0) * (
self.high - self.low
) + self.low
return tf.clip_by_value(squashed, self.low, self.high)
def _unsquash(self, values: TensorType) -> TensorType:
normed_values = (values - self.low) / (self.high - self.low) * 2.0 - 1.0
# Stabilize input to atanh.
save_normed_values = tf.clip_by_value(
normed_values, -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER
)
unsquashed = tf.math.atanh(save_normed_values)
return unsquashed
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
return np.prod(action_space.shape, dtype=np.int32) * 2
@OldAPIStack
| SquashedGaussian |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 232046,
"end": 250138
} | class ____(TestCase):
def _test_embedding_bag_unpack_impl(self, pack_fn, unpack_fn, bit_rate, optimized_qparams, weights):
data_type = weights.dtype
qtype = torch.quint8
if bit_rate == 8:
w_packed = pack_fn(weights)
else:
w_packed = pack_fn(weights, optimized_qparams=optimized_qparams)
w_unpacked = unpack_fn(w_packed)
if (bit_rate == 8 or bit_rate == 4) and data_type != torch.float16:
# torch.quantize_per_channel does not support float16 yet.
obs_weights = weights
# Combine 3D embeddings (e.g. stacked combination of embeddings)
# in a dimension orthogonal to channels.
if (len(obs_weights.shape) > 2):
stacked_shape = list(weights.size())
stacked_shape[1] *= stacked_shape[0]
obs_weights = weights.reshape(stacked_shape[1:])
# Check numerics of prepack function that accepts qtensor as input.
# We use min-max observer to mimic the quantization performed in the original function.
obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(obs_weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
if bit_rate == 4:
qtype = torch.quint4x2
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(obs_weights, qparams[0], qparams[1], axis=0, dtype=qtype)
real_packed_weight = torch.ops.quantized.embedding_bag_prepack(qweight)
self.assertEqual(isinstance(real_packed_weight, torch._C.ScriptObject), True)
unpacked_weight = torch.ops.quantized.embedding_bag_unpack(real_packed_weight)
self.assertEqual(unpacked_weight.int_repr().numpy(), qweight.int_repr().numpy())
self.assertEqual(unpacked_weight.q_per_channel_scales(), qweight.q_per_channel_scales())
self.assertEqual(unpacked_weight.q_per_channel_zero_points(), qweight.q_per_channel_zero_points())
def _test_embedding_bag_unpack_fn(self, pack_fn, unpack_fn, num_embeddings, embedding_dim, bit_rate,
optimized_qparams, num_batches, data_type=np.float32):
# when num_batches = 1, it will create a 2D tensor
unsplit_weight = torch.from_numpy((np.random.random_sample((
num_batches, num_embeddings, embedding_dim)).squeeze() + 1).astype(np.float32))
# test unsplit weight (memory format is `contiguous`)
self._test_embedding_bag_unpack_impl(pack_fn, unpack_fn, bit_rate, optimized_qparams, unsplit_weight)
# test split weights (memory format is not `contiguous`)
split_dim = len(unsplit_weight.shape) - 2
split_weights = torch.split(unsplit_weight, 1, dim=split_dim)
for weight in split_weights:
self._test_embedding_bag_unpack_impl(pack_fn, unpack_fn, bit_rate, optimized_qparams, weight)
def embedding_bag_rowwise_offsets_run(
self, bit_rate, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights,
include_last_offset, fallback_to_no_sparse, sparsity, atol, rtol):
pt_op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_byte_prepack
if bit_rate == 4:
pt_op = torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_4bit_prepack
elif bit_rate == 2:
pt_op = torch.ops.quantized.embedding_bag_2bit_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_2bit_prepack
weights = torch.from_numpy((np.random.random_sample((
num_embeddings, embedding_dim)) + 1).astype(np.float32))
max_segments = 5
max_segment_length = 20
num_lengths = np.random.randint(1, max_segments + 1)
lengths = np.random.randint(0, max_segment_length + 1,
size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):
"""
Convert lengths to offsets
"""
tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)
tt[1:] = t
tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))
if use_begin_offset:
return tt[:-1]
return tt[1:]
offsets = lengths_to_offsets(lengths)
indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
q_weights = pt_prepack_op(weights)
per_sample_weights = torch.from_numpy(np.random.uniform(
low=0.01, high=0.5, size=[len(indices)]).astype(np.float32)) if \
enable_per_sample_weights else None
if include_last_offset:
offsets = torch.cat(
(offsets, torch.tensor([indices.size(0)], dtype=torch.long)), 0
)
# Reference result will be the floating point torch.nn.EmbeddingBag.
def get_reference_result(
num_embeddings, embedding_dim,
include_last_offset, weights, per_sample_weights,
indices, offsets):
embedding_bag = torch.nn.EmbeddingBag(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
include_last_offset=include_last_offset, _weight=weights,
scale_grad_by_freq=False, mode='sum'
)
return embedding_bag(indices, offsets,
per_sample_weights=per_sample_weights)
mapping_table = np.zeros(num_embeddings, dtype=np.int32)
pruned_weights = weights
prune_weights = sparsity > 0
if prune_weights:
if fallback_to_no_sparse:
# Testing that prune_weight with mapping_table {0} will
# fallback to non sparse embedding look up kernel.
mapping_table = np.zeros(1, dtype=np.int32)
else:
# Prune and generate mapping table
num_compressed_rows = 0
unpruned_ids = []
for i in range(num_embeddings):
if np.random.uniform() < sparsity:
mapping_table[i] = -1
q_weights[i, :] = 0
weights[i, :] = 0
else:
mapping_table[i] = num_compressed_rows
num_compressed_rows += 1
unpruned_ids.append(i)
q_weights = q_weights[unpruned_ids]
pruned_weights = weights[unpruned_ids]
result = pt_op(q_weights,
indices.int() if use_32bit_indices else indices,
offsets.int() if use_32bit_offsets else offsets,
mode=0,
pruned_weights=prune_weights,
per_sample_weights=per_sample_weights,
compressed_indices_mapping=torch.tensor(mapping_table),
include_last_offset=include_last_offset)
reference_result = get_reference_result(
num_embeddings, embedding_dim, include_last_offset, weights,
per_sample_weights, indices, offsets)
torch.testing.assert_close(reference_result, result, atol=atol, rtol=rtol)
if bit_rate == 8 or bit_rate == 4:
# Test operator that accepts TorchBind packed weights.
if bit_rate == 4:
qdtype = torch.quint4x2
op = torch.ops.quantized.embedding_bag_4bit
else:
qdtype = torch.quint8
op = torch.ops.quantized.embedding_bag_byte
obs = PerChannelMinMaxObserver(dtype=qdtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(pruned_weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(pruned_weights, qparams[0], qparams[1], axis=0, dtype=qdtype)
packed_weight = torch.ops.quantized.embedding_bag_prepack(qweight)
result = op(packed_weight, indices, offsets, mode=0,
pruned_weights=prune_weights,
per_sample_weights=per_sample_weights,
compressed_indices_mapping=torch.tensor(mapping_table),
include_last_offset=include_last_offset)
torch.testing.assert_close(reference_result, result, atol=atol, rtol=rtol)
""" Tests the correctness of the embedding_bag_8bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
use_32bit_indices=st.booleans(),
use_32bit_offsets=st.booleans(),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans(),
fallback_to_no_sparse=st.booleans(),
sparsity=st.sampled_from([0.0, 0.5, 0.7]))
def test_embedding_bag_byte(self, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices,
use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity):
self.embedding_bag_rowwise_offsets_run(
8, num_embeddings, embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights, include_last_offset,
fallback_to_no_sparse,
sparsity=sparsity, atol=0.005, rtol=1e-3)
""" Tests the correctness of the embedding_bag_4bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0),
num_offsets=st.integers(1, 20),
use_32bit_indices=st.booleans(),
use_32bit_offsets=st.booleans(),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans(),
fallback_to_no_sparse=st.booleans(),
sparsity=st.sampled_from([0.0, 0.5, 0.7]))
def test_embedding_bag_4bit(self, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices,
use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity):
self.embedding_bag_rowwise_offsets_run(4, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity=sparsity,
atol=0.1, rtol=1e-2)
""" Tests the correctness of the embedding_bag_2bit quantized operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 8 == 0),
num_offsets=st.integers(1, 20),
use_32bit_indices=st.booleans(),
use_32bit_offsets=st.booleans(),
enable_per_sample_weights=st.booleans(),
include_last_offset=st.booleans(),
fallback_to_no_sparse=st.booleans(),
sparsity=st.sampled_from([0.0, 0.5, 0.7]))
def test_embedding_bag_2bit(self, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices,
use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity):
self.embedding_bag_rowwise_offsets_run(2, num_embeddings,
embedding_dim, num_offsets,
use_32bit_indices, use_32bit_offsets,
enable_per_sample_weights,
include_last_offset,
fallback_to_no_sparse,
sparsity=sparsity,
atol=1.0, rtol=1e-1)
""" Tests the correctness of the quantized 8 bit embedding lookup operator """
@given(num_embeddings=st.integers(10, 100),
embedding_dim=st.integers(5, 50).filter(lambda x: x % 4 == 0))
def test_embedding(self, num_embeddings, embedding_dim):
dtypes = [torch.quint8, torch.quint4x2]
quant_ops = [torch.ops.quantized.embedding_byte, torch.ops.quantized.embedding_4bit]
atols = [0.005, 0.1]
rtols = [1e-3, 1e-2]
prepack_op = torch.ops.quantized.embedding_bag_prepack
for quant_op, dtype, atol, rtol in zip(quant_ops, dtypes, atols, rtols):
weights = torch.from_numpy((np.random.random_sample((
num_embeddings, embedding_dim)) + 1).astype(np.float32))
obs = PerChannelMinMaxObserver(dtype=dtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=dtype)
max_segments = 5
max_segment_length = 20
num_lengths = np.random.randint(1, max_segments + 1)
lengths = np.random.randint(1, max_segment_length + 1,
size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = torch.from_numpy(np.random.randint(
low=0, high=num_embeddings, size=num_indices, dtype=np.int64))
packed_weight = prepack_op(qweight)
qresult = quant_op(packed_weight, indices, pruned_weights=False)
ref = torch.embedding(weights, indices, padding_idx=-1, scale_grad_by_freq=False, sparse=False)
torch.testing.assert_close(ref, qresult, atol=atol, rtol=rtol)
def test_embedding_2d_indices(self):
"""
Tests the case where 2D indices are passed into the operator
In this case the operator computes the correct offsets argument.
Output shape is dependent on the indices dimension.
"""
quant_op = torch.ops.quantized.embedding_byte
prepack_op = torch.ops.quantized.embedding_bag_prepack
indices = torch.tensor([[9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8], [3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3]])
weights = torch.randn(10, 12, dtype=torch.float32)
ref = torch.embedding(weights, indices, padding_idx=-1, scale_grad_by_freq=False, sparse=False)
obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
qparams = obs.calculate_qparams()
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=torch.quint8)
packed_weight = prepack_op(qweight)
qresult = quant_op(packed_weight, indices, pruned_weights=False)
torch.testing.assert_close(ref, qresult, atol=0.05, rtol=1e-3)
def test_embedding_bag_2d_indices(self):
"""
Tests the case where 2D indices are passed into the operator
In this case the operator computes the correct offsets argument.
"""
indices = torch.tensor([[9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8], [3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3]])
weights = torch.randn(10, 12, dtype=torch.float32)
embedding_bag = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=False, _weight=weights,
scale_grad_by_freq=False, mode='sum'
)
result = embedding_bag(indices)
pt_op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
pt_prepack_op = torch.ops.quantized.embedding_bag_byte_prepack
q_weights = pt_prepack_op(weights)
qresult = pt_op(q_weights, indices, mode=0, pruned_weights=False)
torch.testing.assert_close(result, qresult, atol=0.05, rtol=1e-3)
# Test TorchBind based embedding_bag operator
obs = PerChannelMinMaxObserver(dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0)
obs(weights)
# Get the scale and zero point for the weight tensor
qparams = obs.calculate_qparams()
# Quantize the weights to 8bits
qweight = torch.quantize_per_channel(weights, qparams[0], qparams[1], axis=0, dtype=torch.quint8)
packed_weight = torch.ops.quantized.embedding_bag_prepack(qweight)
qresult = torch.ops.quantized.embedding_bag_byte(packed_weight, indices, mode=0)
torch.testing.assert_close(result, qresult, atol=0.05, rtol=1e-3)
| TestQuantizedEmbeddingOps |
python | PyCQA__pylint | doc/data/messages/n/non-parent-init-called/bad.py | {
"start": 190,
"end": 327
} | class ____(Vertebrate):
def __init__(self):
Animal.__init__(self) # [non-parent-init-called]
self.is_adorable = True
| Cat |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 118128,
"end": 118936
} | class ____(Reduce):
"""Computes the weighted sum over batch size of the given values.
For example, if values is [1, 3, 5, 7] then the metric value is 4.
If the weights were specified as [1, 1, 0, 0] then the value would be 1.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as sum
over batch size which is an idempotent operation that simply divides `total`
by `count`.
If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0
to mask values.
"""
def __init__(self, name='sum_over_batch_size', dtype=None):
super(SumOverBatchSize, self).__init__(
reduction=metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
name=name,
dtype=dtype)
| SumOverBatchSize |
python | boto__boto3 | tests/functional/test_s3.py | {
"start": 18577,
"end": 20143
} | class ____(unittest.TestCase):
def setUp(self):
self.session = boto3.session.Session(
aws_access_key_id='foo',
aws_secret_access_key='bar',
region_name='us-west-2',
)
self.s3 = self.session.resource('s3')
self.obj_summary = self.s3.ObjectSummary('my_bucket', 'my_key')
self.obj_summary_size = 12
self.stubber = Stubber(self.s3.meta.client)
self.stubber.activate()
self.stubber.add_response(
method='head_object',
service_response={
'ContentLength': self.obj_summary_size,
'ETag': 'my-etag',
'ContentType': 'binary',
},
expected_params={'Bucket': 'my_bucket', 'Key': 'my_key'},
)
def tearDown(self):
self.stubber.deactivate()
def test_has_load(self):
# Validate load was injected onto ObjectSummary.
assert hasattr(self.obj_summary, 'load')
def test_autoloads_correctly(self):
# In HeadObject the parameter returned is ContentLength, this
# should get mapped to Size of ListObject since the resource uses
# the shape returned to by ListObjects.
assert self.obj_summary.size == self.obj_summary_size
def test_cannot_access_other_non_related_parameters(self):
# Even though an HeadObject was used to load this, it should
# only expose the attributes from its shape defined in ListObjects.
assert not hasattr(self.obj_summary, 'content_length')
| TestS3ObjectSummary |
python | sympy__sympy | sympy/concrete/expr_with_limits.py | {
"start": 8522,
"end": 19206
} | class ____(Expr):
__slots__ = ('is_commutative',)
def __new__(cls, function, *symbols, **assumptions):
from sympy.concrete.products import Product
pre = _common_new(cls, function, *symbols,
discrete=issubclass(cls, Product), **assumptions)
if isinstance(pre, tuple):
function, limits, _ = pre
else:
return pre
# limits must have upper and lower bounds; the indefinite form
# is not supported. This restriction does not apply to AddWithLimits
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def kind(self):
return self.function.kind
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the limit variables.
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def bound_symbols(self):
"""Return only variables that are dummy variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i, j, k
>>> Integral(x**i, (i, 1, 3), (j, 2), k).bound_symbols
[i, j]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
sympy.integrals.integrals.Integral.transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits if len(l) != 1]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
{y}
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
# mask off non-symbol integration variables that have
# more than themself as a free symbol
reps = {i[0]: i[0] if i[0].free_symbols == {i[0]} else Dummy()
for i in self.limits}
function = function.xreplace(reps)
isyms = function.free_symbols
for xab in limits:
v = reps[xab[0]]
if len(xab) == 1:
isyms.add(v)
continue
# take out the target symbol
if v in isyms:
isyms.remove(v)
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
reps = {v: k for k, v in reps.items()}
return {reps.get(_, _) for _ in isyms}
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def _eval_interval(self, x, a, b):
limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s, n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x, a
>>> Integral(a*x**2, x).subs(x, 4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for integrals
change_index : Perform mapping on the sum and product dummy variables
"""
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
if new._diff_wrt:
xab = (new,)
else:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, (AppliedUndef, UndefinedFunction)):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution cannot create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
@property
def has_finite_limits(self):
"""
Returns True if the limits are known to be finite, either by the
explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be infinite, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 1, 8)).has_finite_limits
True
>>> Integral(x, (x, 1, oo)).has_finite_limits
False
>>> M = Symbol('M')
>>> Sum(x, (x, 1, M)).has_finite_limits
>>> N = Symbol('N', integer=True)
>>> Product(x, (x, 1, N)).has_finite_limits
True
See Also
========
has_reversed_limits
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
if any(l.is_infinite for l in lim[1:]):
# Any of the bounds are +/-oo
return False
elif any(l.is_infinite is None for l in lim[1:]):
# Maybe there are assumptions on the variable?
if lim[0].is_infinite is None:
ret_None = True
else:
if lim[0].is_infinite is None:
ret_None = True
if ret_None:
return None
return True
@property
def has_reversed_limits(self):
"""
Returns True if the limits are known to be in reversed order, either
by the explicit bounds, assumptions on the bounds, or assumptions on the
variables. False if known to be in normal order, based on the bounds.
None if not enough information is available to determine.
Examples
========
>>> from sympy import Sum, Integral, Product, oo, Symbol
>>> x = Symbol('x')
>>> Sum(x, (x, 8, 1)).has_reversed_limits
True
>>> Sum(x, (x, 1, oo)).has_reversed_limits
False
>>> M = Symbol('M')
>>> Integral(x, (x, 1, M)).has_reversed_limits
>>> N = Symbol('N', integer=True, positive=True)
>>> Sum(x, (x, 1, N)).has_reversed_limits
False
>>> Product(x, (x, 2, N)).has_reversed_limits
>>> Product(x, (x, 2, N)).subs(N, N + 2).has_reversed_limits
False
See Also
========
sympy.concrete.expr_with_intlimits.ExprWithIntLimits.has_empty_sequence
"""
ret_None = False
for lim in self.limits:
if len(lim) == 3:
var, a, b = lim
dif = b - a
if dif.is_extended_negative:
return True
elif dif.is_extended_nonnegative:
continue
else:
ret_None = True
else:
return None
if ret_None:
return None
return False
| ExprWithLimits |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_memory_tool_20250818_view_command.py | {
"start": 244,
"end": 519
} | class ____(BaseModel):
command: Literal["view"]
"""Command type identifier"""
path: str
"""Path to directory or file to view"""
view_range: Optional[List[int]] = None
"""Optional line range for viewing specific lines"""
| BetaMemoryTool20250818ViewCommand |
python | openai__openai-python | src/openai/types/responses/response_function_shell_tool_call.py | {
"start": 250,
"end": 522
} | class ____(BaseModel):
commands: List[str]
max_output_length: Optional[int] = None
"""Optional maximum number of characters to return from each command."""
timeout_ms: Optional[int] = None
"""Optional timeout in milliseconds for the commands."""
| Action |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1121433,
"end": 1122219
} | class ____(sgqlc.types.Type, Contribution):
"""Represents the contribution a user made by leaving a review on a
pull request.
"""
__schema__ = github_schema
__field_names__ = ("pull_request", "pull_request_review", "repository")
pull_request = sgqlc.types.Field(sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest")
"""The pull request the user reviewed."""
pull_request_review = sgqlc.types.Field(sgqlc.types.non_null("PullRequestReview"), graphql_name="pullRequestReview")
"""The review the user left on the pull request."""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The repository containing the pull request that the user reviewed."""
| CreatedPullRequestReviewContribution |
python | kamyu104__LeetCode-Solutions | Python/read-n-characters-given-read4-ii-call-multiple-times.py | {
"start": 410,
"end": 1197
} | class ____(object):
def __init__(self):
self.__buf4 = [''] * 4
self.__i4 = 0
self.__n4 = 0
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
i = 0
while i < n:
if self.__i4 < self.__n4: # Any characters in buf4.
buf[i] = self.__buf4[self.__i4]
i += 1
self.__i4 += 1
else:
self.__n4 = read4(self.__buf4) # Read more characters.
if self.__n4:
self.__i4 = 0
else: # Buffer has been empty.
break
return i
| Solution |
python | apache__airflow | providers/apache/kafka/src/airflow/providers/apache/kafka/hooks/base.py | {
"start": 989,
"end": 3716
} | class ____(BaseHook):
"""
A base hook for interacting with Apache Kafka.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
"""
conn_name_attr = "kafka_config_id"
default_conn_name = "kafka_default"
conn_type = "kafka"
hook_name = "Apache Kafka"
def __init__(self, kafka_config_id=default_conn_name, *args, **kwargs):
"""Initialize our Base."""
super().__init__()
self.kafka_config_id = kafka_config_id
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "login", "password", "port", "host"],
"relabeling": {"extra": "Config Dict"},
"placeholders": {
"extra": '{"bootstrap.servers": "localhost:9092", "group.id": "my-group"}',
},
}
def _get_client(self, config) -> Any:
return AdminClient(config)
@cached_property
def get_conn(self) -> Any:
"""Get the configuration object."""
config = self.get_connection(self.kafka_config_id).extra_dejson
if not (config.get("bootstrap.servers", None)):
raise ValueError("config['bootstrap.servers'] must be provided.")
bootstrap_servers = config.get("bootstrap.servers")
if (
bootstrap_servers
and bootstrap_servers.find("cloud.goog") != -1
and bootstrap_servers.find("managedkafka") != -1
):
try:
from airflow.providers.google.cloud.hooks.managed_kafka import ManagedKafkaHook
except ImportError:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(
"Failed to import ManagedKafkaHook. For using this functionality google provider version "
">= 14.1.0 should be pre-installed."
)
self.log.info("Adding token generation for Google Auth to the confluent configuration.")
hook = ManagedKafkaHook()
token = hook.get_confluent_token
config.update({"oauth_cb": token})
return self._get_client(config)
def test_connection(self) -> tuple[bool, str]:
"""Test Connectivity from the UI."""
try:
config = self.get_connection(self.kafka_config_id).extra_dejson
t = AdminClient(config).list_topics(timeout=10)
if t:
return True, "Connection successful."
except Exception as e:
return False, str(e)
return False, "Failed to establish connection."
| KafkaBaseHook |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 55855,
"end": 61150
} | class ____(TestCase):
"""Check behaviour of _order field added by Meta.order_with_respect_to.
The Meta.order_with_respect_to option adds an OrderWrt field named
"_order", where OrderWrt is a proxy class for an IntegerField that sets
some default options.
The simple_history strategy is:
- Convert to a plain IntegerField in the historical record
- When restoring a historical instance, add the old value. This may
result in duplicate ordering values and non-deterministic ordering.
"""
def setUp(self):
"""Create works in published order."""
s = self.series = Series.objects.create(
name="The Chronicles of Narnia", author="C.S. Lewis"
)
self.w_lion = s.works.create(title="The Lion, the Witch and the Wardrobe")
self.w_caspian = s.works.create(title="Prince Caspian")
self.w_voyage = s.works.create(title="The Voyage of the Dawn Treader")
self.w_chair = s.works.create(title="The Silver Chair")
self.w_horse = s.works.create(title="The Horse and His Boy")
self.w_nephew = s.works.create(title="The Magician's Nephew")
self.w_battle = s.works.create(title="The Last Battle")
def test_order(self):
"""Confirm that works are ordered by creation."""
order = self.series.get_serieswork_order()
expected = [
self.w_lion.pk,
self.w_caspian.pk,
self.w_voyage.pk,
self.w_chair.pk,
self.w_horse.pk,
self.w_nephew.pk,
self.w_battle.pk,
]
self.assertSequenceEqual(order, expected)
self.assertEqual(0, self.w_lion._order)
self.assertEqual(1, self.w_caspian._order)
self.assertEqual(2, self.w_voyage._order)
self.assertEqual(3, self.w_chair._order)
self.assertEqual(4, self.w_horse._order)
self.assertEqual(5, self.w_nephew._order)
self.assertEqual(6, self.w_battle._order)
def test_order_field_in_historical_model(self):
work_order_field = self.w_lion._meta.get_field("_order")
self.assertEqual(type(work_order_field), OrderWrt)
history = self.w_lion.history.all()[0]
history_order_field = history._meta.get_field("_order")
self.assertEqual(type(history_order_field), models.IntegerField)
def test_history_object_has_order(self):
history = self.w_lion.history.all()[0]
self.assertEqual(self.w_lion._order, history.history_object._order)
def test_restore_object_with_changed_order(self):
# Change a title
self.w_caspian.title = "Prince Caspian: The Return to Narnia"
self.w_caspian.save()
self.assertEqual(2, len(self.w_caspian.history.all()))
self.assertEqual(1, self.w_caspian._order)
# Switch to internal chronological order
chronological = [
self.w_nephew.pk,
self.w_lion.pk,
self.w_horse.pk,
self.w_caspian.pk,
self.w_voyage.pk,
self.w_chair.pk,
self.w_battle.pk,
]
self.series.set_serieswork_order(chronological)
self.assertSequenceEqual(self.series.get_serieswork_order(), chronological)
# This uses an update, not a save, so no new history is created
w_caspian = SeriesWork.objects.get(id=self.w_caspian.id)
self.assertEqual(2, len(w_caspian.history.all()))
self.assertEqual(1, w_caspian.history.all()[0]._order)
self.assertEqual(1, w_caspian.history.all()[1]._order)
self.assertEqual(3, w_caspian._order)
# Revert to first title, old order
old = w_caspian.history.all()[1].history_object
old.save()
w_caspian = SeriesWork.objects.get(id=self.w_caspian.id)
self.assertEqual(3, len(w_caspian.history.all()))
self.assertEqual(1, w_caspian.history.all()[0]._order)
self.assertEqual(1, w_caspian.history.all()[1]._order)
self.assertEqual(1, w_caspian.history.all()[2]._order)
self.assertEqual(1, w_caspian._order) # The order changed
w_lion = SeriesWork.objects.get(id=self.w_lion.id)
self.assertEqual(1, w_lion._order) # and is identical to another order
# New order is non-deterministic around identical IDs
series = Series.objects.get(id=self.series.id)
order = series.get_serieswork_order()
self.assertEqual(order[0], self.w_nephew.pk)
self.assertTrue(order[1] in (self.w_lion.pk, self.w_caspian.pk))
self.assertTrue(order[2] in (self.w_lion.pk, self.w_caspian.pk))
self.assertEqual(order[3], self.w_horse.pk)
self.assertEqual(order[4], self.w_voyage.pk)
self.assertEqual(order[5], self.w_chair.pk)
self.assertEqual(order[6], self.w_battle.pk)
def test_migrations_include_order(self):
from django.db.migrations import state
model_state = state.ModelState.from_model(SeriesWork.history.model)
found = False
for name, field in model_state.fields.items():
if name == "_order":
found = True
self.assertEqual(type(field), models.IntegerField)
self.assertTrue(found, "_order not in fields " + repr(model_state.fields))
| TestOrderWrtField |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 6895,
"end": 7124
} | class ____(AutoSlugField):
def find_unique(self, model_instance, field, iterator, *args):
self.overrided = True
return super().find_unique(model_instance, field, iterator, *args)
| OverridedFindUniqueAutoSlugField |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/quantization_test.py | {
"start": 3691,
"end": 4829
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Create a graph containing single segment with no quantization ranges."""
def GraphFn(self, x):
return _GraphFn(x, add_quantization_nodes=True)
def GetParams(self):
return _GetParams(self)
def ShouldRunTest(self, run_params):
# Only test FP32/FP16 mode.
return not trt_test.IsQuantizationMode(
run_params.precision_mode), "test non-INT8"
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
# The fake quant ops are not supported in FP32/FP16 mode, and will split the
# graph into two TRT segments.
return ["TRTEngineOp_000", "TRTEngineOp_001"]
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01
if __name__ == "__main__":
test.main()
| NonQuantizedPrecisionsWithRangesTest |
python | readthedocs__readthedocs.org | readthedocs/organizations/querysets.py | {
"start": 502,
"end": 5561
} | class ____(NoReprQuerySet, models.QuerySet):
"""Organizations queryset."""
def for_user(self, user):
# Never list all for membership
return self.filter(
Q(owners__in=[user]) | Q(teams__members__in=[user]),
).distinct()
def for_admin_user(self, user):
return self.filter(owners__in=[user]).distinct()
def api(self, user):
return self.for_user(user)
def created_days_ago(self, days, field="pub_date"):
"""
Filter organizations by creation date.
:param days: Days ago that organization was created
:param field: Field name to use in comparison, default: pub_date
"""
when = timezone.now() - timedelta(days=days)
query_filter = {}
query_filter[field + "__year"] = when.year
query_filter[field + "__month"] = when.month
query_filter[field + "__day"] = when.day
return self.filter(**query_filter)
def subscription_trial_plan_ended(self):
"""
Organizations with subscriptions to Trial Plan ended.
Trial Plan in Stripe has a 30-day trial set up. After that period ends,
the subscription is canceled.
"""
return self.filter(
stripe_subscription__status=SubscriptionStatus.canceled,
stripe_subscription__items__price__id=settings.RTD_ORG_DEFAULT_STRIPE_SUBSCRIPTION_PRICE,
)
def subscription_ended(self, days, exact=False):
"""
Filter organizations which their subscription has ended.
This will return organizations which their subscription has been canceled,
or hasn't been paid for ``days``.
:param days: Days after the subscription has ended
:param exact: Make the ``days`` date to match exactly that day after the
subscription has ended (useful to send emails only once)
"""
date_today = timezone.now().date()
end_date = date_today - timedelta(days=days)
if exact:
# We use ``__date`` here since the field is a DateTimeField
subscription_ended = self.filter(
Q(
stripe_subscription__status=SubscriptionStatus.canceled,
stripe_subscription__ended_at__date=end_date,
)
| Q(
stripe_subscription__status__in=[
SubscriptionStatus.past_due,
SubscriptionStatus.incomplete,
SubscriptionStatus.unpaid,
],
stripe_subscription__latest_invoice__due_date__date=end_date,
stripe_subscription__latest_invoice__status=InvoiceStatus.open,
)
)
else:
subscription_ended = self.filter(
Q(
stripe_subscription__status=SubscriptionStatus.canceled,
stripe_subscription__ended_at__lt=end_date,
)
| Q(
stripe_subscription__status__in=[
SubscriptionStatus.past_due,
SubscriptionStatus.incomplete,
SubscriptionStatus.unpaid,
],
stripe_subscription__latest_invoice__due_date__date__lt=end_date,
stripe_subscription__latest_invoice__status=InvoiceStatus.open,
)
)
return subscription_ended.distinct()
def disable_soon(self, days, exact=False):
"""
Filter organizations that will eventually be marked as disabled.
These are organizations which their subscription has ended,
excluding organizations that can't be disabled, or are already disabled.
:param days: Days after the subscription has ended
:param exact: Make the ``days`` date to match exactly that day after the
subscription has ended (useful to send emails only once)
"""
return (
self.subscription_ended(days=days, exact=exact)
# Exclude organizations that can't be disabled.
.exclude(never_disable=True)
# Exclude organizations that are already disabled
.exclude(disabled=True)
)
def clean_artifacts(self):
"""
Filter organizations which their artifacts can be cleaned up.
These organizations are at least 3*DISABLE_AFTER_DAYS (~3 months) that
are disabled and their artifacts weren't cleaned already. We should be
safe to cleanup all their artifacts at this point.
"""
return self.subscription_ended(days=3 * DISABLE_AFTER_DAYS, exact=False).filter(
disabled=True,
artifacts_cleaned=False,
)
def single_owner(self, user):
"""Returns organizations where `user` is the only owner."""
return self.annotate(count_owners=Count("owners")).filter(
owners=user,
count_owners=1,
)
| BaseOrganizationQuerySet |
python | great-expectations__great_expectations | tests/expectations/fixtures/expect_column_values_to_equal_three.py | {
"start": 857,
"end": 1098
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.equal_three"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column == 3
| ColumnValuesEqualThree |
python | numba__numba | numba/core/errors.py | {
"start": 20351,
"end": 20561
} | class ____(NumbaError):
"""
Failure during constant inference.
"""
def __init__(self, value, loc=None):
super(ConstantInferenceError, self).__init__(value, loc=loc)
| ConstantInferenceError |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 81768,
"end": 84596
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
client_id: str,
client_secret: str,
refresh_token: str,
auth_type: Optional[str] = None,
region: Optional[str] = None,
report_wait_timeout: Optional[int] = None,
report_generation_max_retries: Optional[int] = None,
start_date: Optional[str] = None,
profiles: Optional[list[int]] = None,
state_filter: Optional[list[str]] = None,
):
"""Airbyte Source for Amazon Ads.
Documentation can be found at https://docs.airbyte.com/integrations/sources/amazon-ads
Args:
name (str): The name of the destination.
client_id (str): The client ID of your Amazon Ads developer application. See the docs for more information.
client_secret (str): The client secret of your Amazon Ads developer application. See the docs for more information.
refresh_token (str): Amazon Ads refresh token. See the docs for more information on how to obtain this token.
region (Optional[str]): Region to pull data from (EU/NA/FE). See docs for more details.
report_wait_timeout (Optional[int]): Timeout duration in minutes for Reports. Default is 60 minutes.
report_generation_max_retries (Optional[int]): Maximum retries Airbyte will attempt for fetching report data. Default is 5.
start_date (Optional[str]): The Start date for collecting reports, should not be more than 60 days in the past. In YYYY-MM-DD format
profiles (Optional[List[int]]): Profile IDs you want to fetch data for. See docs for more details.
state_filter (Optional[List[str]]): Reflects the state of the Display, Product, and Brand Campaign streams as enabled, paused, or archived. If you do not populate this field, it will be ignored completely.
"""
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
self.region = check.opt_str_param(region, "region")
self.report_wait_timeout = check.opt_int_param(report_wait_timeout, "report_wait_timeout")
self.report_generation_max_retries = check.opt_int_param(
report_generation_max_retries, "report_generation_max_retries"
)
self.start_date = check.opt_str_param(start_date, "start_date")
self.profiles = check.opt_nullable_list_param(profiles, "profiles", int)
self.state_filter = check.opt_nullable_list_param(state_filter, "state_filter", str)
super().__init__("Amazon Ads", name)
| AmazonAdsSource |
python | django__django | django/contrib/auth/context_processors.py | {
"start": 724,
"end": 1911
} | class ____:
def __init__(self, user):
self.user = user
def __repr__(self):
return f"{self.__class__.__qualname__}({self.user!r})"
def __getitem__(self, app_label):
return PermLookupDict(self.user, app_label)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if "." not in perm_name:
# The name refers to module.
return bool(self[perm_name])
app_label, perm_name = perm_name.split(".", 1)
return self[app_label][perm_name]
def auth(request):
"""
Return context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, use AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, "user"):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
"user": user,
"perms": PermWrapper(user),
}
| PermWrapper |
python | pytorch__pytorch | torch/export/unflatten.py | {
"start": 4474,
"end": 7891
} | class ____(_SubmoduleBase, torch.nn.Module):
"""A module that uses torch.fx.Interpreter to execute instead of the usual
codegen that GraphModule uses. This provides better stack trace information
and makes it easier to debug execution.
"""
graph_module: Optional[torch.fx.GraphModule]
def __init__(
self,
graph: torch.fx.Graph,
ty: Optional[str] = None,
):
super().__init__()
self.graph = graph
self._ty = ty
self.graph.owning_module = self # type: ignore[assignment]
self._run_with_interpreter = RUN_WITH_INTERPRETER
def forward(self, *args, **kwargs):
assert self.graph_module is not None, "Didn't finalize this InterpreterModule"
if not is_fx_symbolic_tracing() and (
torch.compiler.is_dynamo_compiling() or not self._run_with_interpreter
):
# Dynamo cannot trace through torch.fx.Interpreter, so fall back to
# GraphModule codegen in this instance.
# Patch the codegened forward to run with this InterpreterModule,
# so attribute accesses, etc. are on this module instead.
return type(self.graph_module).forward(self, *args, **kwargs)
else:
if kwargs:
# Handle **kwargs. FX only natively supports positional
# arguments (through placeholders). So in order to pass in
# kwargs, we must correspond the names of the placeholders with
# the keys in the kwarg dict.
arg_list = list(args)
kwarg_names = self.arg_names[len(arg_list) :]
arg_list.extend(
kwargs[kwarg_name]
for kwarg_name in kwarg_names
if kwarg_name in kwargs
)
# Assert that the kwargs passed in exactly match the positional
# arguments specified by the GraphModule. This should be
# guaranteed by the unflattening process.
assert len(kwarg_names) == len(kwargs)
assert len(arg_list) == len(self.arg_names)
args = tuple(arg_list)
return torch.fx.Interpreter(self, graph=self.graph).run(
*args, enable_io_processing=False
)
def finalize(self):
# We need to "finalize" because GraphModule populates its own state_dict
# based on the get_attrs observed in the graph. So we need to fully
# construct the graph and call _sink_params before generating this
# GraphModule.
# need to set `graph_module` directly on the dict to avoid it getting
# registered as a submodule.
self.__dict__["graph_module"] = torch.fx.GraphModule(self, self.graph)
self.graph.lint()
# Cache arg names for kwarg handling (see forward())
self.arg_names = []
for node in self.graph.nodes:
if node.op == "placeholder":
self.arg_names.append(node.target)
def print_readable(
self,
print_output=True,
include_stride=False,
include_device=False,
colored=False,
):
return _print_readable(
self,
"InterpreterModule",
print_output,
include_stride,
include_device,
colored,
)
| InterpreterModule |
python | scipy__scipy | scipy/optimize/tests/test_least_squares.py | {
"start": 30209,
"end": 38297
} | class ____(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_callback_with_lm_method(self):
def callback(x):
assert(False) # Dummy callback function
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"Callback function specified, but not supported with `lm` method.",
UserWarning,
)
least_squares(fun_trivial, x0=[0], method='lm', callback=callback)
def test_basic():
# test that 'method' arg is really optional
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
def test_callback():
# test that callback function works as expected
results = []
def my_callback_optimresult(intermediate_result: OptimizeResult):
results.append(intermediate_result)
def my_callback_x(x):
r = OptimizeResult()
r.nit = 1
r.x = x
results.append(r)
return False
def my_callback_optimresult_stop_exception(
intermediate_result: OptimizeResult):
results.append(intermediate_result)
raise StopIteration
def my_callback_x_stop_exception(x):
r = OptimizeResult()
r.nit = 1
r.x = x
results.append(r)
raise StopIteration
# Try for different function signatures and stop methods
callbacks_nostop = [my_callback_optimresult, my_callback_x]
callbacks_stop = [my_callback_optimresult_stop_exception,
my_callback_x_stop_exception]
# Try for all the implemented methods: trf, trf_bounds and dogbox
calls = [
lambda callback: least_squares(fun_trivial, 5.0, method='trf',
callback=callback),
lambda callback: least_squares(fun_trivial, 5.0, method='trf',
bounds=(-8.0, 8.0), callback=callback),
lambda callback: least_squares(fun_trivial, 5.0, method='dogbox',
callback=callback)
]
for mycallback, call in product(callbacks_nostop, calls):
results.clear()
# Call the different implemented methods
res = call(mycallback)
# Check that callback was called
assert len(results) > 0
# Check that results data makes sense
assert results[-1].nit > 0
# Check that it didn't stop because of the callback
assert res.status != -2
# final callback x should be same as final result
assert_allclose(results[-1].x, res.x)
for mycallback, call in product(callbacks_stop, calls):
results.clear()
# Call the different implemented methods
res = call(mycallback)
# Check that callback was called
assert len(results) > 0
# Check that only one iteration was run
assert results[-1].nit == 1
# Check that it stopped because of the callback
assert res.status == -2
def test_small_tolerances_for_lm():
for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
(1e-13, None, 1e-13),
(1e-13, 1e-13, None)]:
assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
ftol=ftol, gtol=gtol, method='lm')
def test_fp32_gh12991():
# checks that smaller FP sizes can be used in least_squares
# this is the minimum working example reported for gh12991
rng = np.random.default_rng(1978)
x = np.linspace(0, 1, 100, dtype=np.float32)
y = rng.random(size=100, dtype=np.float32)
# changed in gh21872. These functions should've been working in fp32 to force
# approx_derivative to work in fp32. One of the initial steps in least_squares
# is to force x0 (p) to be a float, meaning that the output of func and err would
# be in float64, unless forced to be in float32
def func(p, x):
return (p[0] + p[1] * x).astype(np.float32)
def err(p, x, y):
return (func(p, x) - y).astype(np.float32)
def mse(p, x, y):
return np.sum(err(p, x, y)**2)
res = least_squares(err, [-1.0, -1.0], args=(x, y))
# previously the initial jacobian calculated for this would be all 0
# and the minimize would terminate immediately, with nfev=1, would
# report a successful minimization (it shouldn't have done), but be
# unchanged from the initial solution.
# It was terminating early because the underlying approx_derivative
# used a step size for FP64 when the working space was FP32.
assert res.nfev > 2
# compare output to solver that doesn't use derivatives
res2 = minimize(
mse,
[-1.0, 1.0],
method='cobyqa',
args=(x, y),
options={'final_tr_radius': 1e-6}
)
assert_allclose(res.x, res2.x, atol=9e-5)
def test_gh_18793_and_19351():
answer = 1e-12
initial_guess = 1.1e-12
def chi2(x):
return (x-answer)**2
gtol = 1e-15
res = least_squares(chi2, x0=initial_guess, gtol=1e-15, bounds=(0, np.inf))
# Original motivation: gh-18793
# if we choose an initial condition that is close to the solution
# we shouldn't return an answer that is further away from the solution
# Update: gh-19351
# However this requirement does not go well with 'trf' algorithm logic.
# Some regressions were reported after the presumed fix.
# The returned solution is good as long as it satisfies the convergence
# conditions.
# Specifically in this case the scaled gradient will be sufficiently low.
scaling, _ = CL_scaling_vector(res.x, res.grad,
np.atleast_1d(0), np.atleast_1d(np.inf))
assert res.status == 1 # Converged by gradient
assert np.linalg.norm(res.grad * scaling, ord=np.inf) < gtol
def test_gh_19103():
# Checks that least_squares trf method selects a strictly feasible point,
# and thus succeeds instead of failing,
# when the initial guess is reported exactly at a boundary point.
# This is a reduced example from gh191303
ydata = np.array([0.] * 66 + [
1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1.,
1., 1., 1., 0., 0., 0., 1., 0., 0., 2., 1.,
0., 3., 1., 6., 5., 0., 0., 2., 8., 4., 4.,
6., 9., 7., 2., 7., 8., 2., 13., 9., 8., 11.,
10., 13., 14., 19., 11., 15., 18., 26., 19., 32., 29.,
28., 36., 32., 35., 36., 43., 52., 32., 58., 56., 52.,
67., 53., 72., 88., 77., 95., 94., 84., 86., 101., 107.,
108., 118., 96., 115., 138., 137.,
])
xdata = np.arange(0, ydata.size) * 0.1
def exponential_wrapped(params):
A, B, x0 = params
return A * np.exp(B * (xdata - x0)) - ydata
x0 = [0.01, 1., 5.]
bounds = ((0.01, 0, 0), (np.inf, 10, 20.9))
res = least_squares(exponential_wrapped, x0, method='trf', bounds=bounds)
assert res.success
| TestLM |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/horizontal_auto_width.py | {
"start": 122,
"end": 652
} | class ____(App):
"""
Checks that the auto width of the parent Horizontal is correct.
"""
def compose(self) -> ComposeResult:
yield Horizontal(
Static("Docked left 1", id="dock-1"),
Static("Docked left 2", id="dock-2"),
Static("Widget 1", classes="widget"),
Static("Widget 2", classes="widget"),
id="horizontal",
)
app = HorizontalAutoWidth(css_path="horizontal_auto_width.tcss")
if __name__ == "__main__":
app.run()
| HorizontalAutoWidth |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 7791,
"end": 8026
} | class ____(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
| AllType |
python | pydantic__pydantic | pydantic/v1/fields.py | {
"start": 2333,
"end": 13552
} | class ____(Representation):
"""
Captures extra information about a field.
"""
__slots__ = (
'default',
'default_factory',
'alias',
'alias_priority',
'title',
'description',
'exclude',
'include',
'const',
'gt',
'ge',
'lt',
'le',
'multiple_of',
'allow_inf_nan',
'max_digits',
'decimal_places',
'min_items',
'max_items',
'unique_items',
'min_length',
'max_length',
'allow_mutation',
'repr',
'regex',
'discriminator',
'extra',
)
# field constraints with the default value, it's also used in update_from_config below
__field_constraints__ = {
'min_length': None,
'max_length': None,
'regex': None,
'gt': None,
'lt': None,
'ge': None,
'le': None,
'multiple_of': None,
'allow_inf_nan': None,
'max_digits': None,
'decimal_places': None,
'min_items': None,
'max_items': None,
'unique_items': None,
'allow_mutation': True,
}
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
self.default = default
self.default_factory = kwargs.pop('default_factory', None)
self.alias = kwargs.pop('alias', None)
self.alias_priority = kwargs.pop('alias_priority', 2 if self.alias is not None else None)
self.title = kwargs.pop('title', None)
self.description = kwargs.pop('description', None)
self.exclude = kwargs.pop('exclude', None)
self.include = kwargs.pop('include', None)
self.const = kwargs.pop('const', None)
self.gt = kwargs.pop('gt', None)
self.ge = kwargs.pop('ge', None)
self.lt = kwargs.pop('lt', None)
self.le = kwargs.pop('le', None)
self.multiple_of = kwargs.pop('multiple_of', None)
self.allow_inf_nan = kwargs.pop('allow_inf_nan', None)
self.max_digits = kwargs.pop('max_digits', None)
self.decimal_places = kwargs.pop('decimal_places', None)
self.min_items = kwargs.pop('min_items', None)
self.max_items = kwargs.pop('max_items', None)
self.unique_items = kwargs.pop('unique_items', None)
self.min_length = kwargs.pop('min_length', None)
self.max_length = kwargs.pop('max_length', None)
self.allow_mutation = kwargs.pop('allow_mutation', True)
self.regex = kwargs.pop('regex', None)
self.discriminator = kwargs.pop('discriminator', None)
self.repr = kwargs.pop('repr', True)
self.extra = kwargs
def __repr_args__(self) -> 'ReprArgs':
field_defaults_to_hide: Dict[str, Any] = {
'repr': True,
**self.__field_constraints__,
}
attrs = ((s, getattr(self, s)) for s in self.__slots__)
return [(a, v) for a, v in attrs if v != field_defaults_to_hide.get(a, None)]
def get_constraints(self) -> Set[str]:
"""
Gets the constraints set on the field by comparing the constraint value with its default value
:return: the constraints set on field_info
"""
return {attr for attr, default in self.__field_constraints__.items() if getattr(self, attr) != default}
def update_from_config(self, from_config: Dict[str, Any]) -> None:
"""
Update this FieldInfo based on a dict from get_field_info, only fields which have not been set are dated.
"""
for attr_name, value in from_config.items():
try:
current_value = getattr(self, attr_name)
except AttributeError:
# attr_name is not an attribute of FieldInfo, it should therefore be added to extra
# (except if extra already has this value!)
self.extra.setdefault(attr_name, value)
else:
if current_value is self.__field_constraints__.get(attr_name, None):
setattr(self, attr_name, value)
elif attr_name == 'exclude':
self.exclude = ValueItems.merge(value, current_value)
elif attr_name == 'include':
self.include = ValueItems.merge(value, current_value, intersect=True)
def _validate(self) -> None:
if self.default is not Undefined and self.default_factory is not None:
raise ValueError('cannot specify both default and default_factory')
def Field(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny', Any]] = None,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny', Any]] = None,
const: Optional[bool] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
multiple_of: Optional[float] = None,
allow_inf_nan: Optional[bool] = None,
max_digits: Optional[int] = None,
decimal_places: Optional[int] = None,
min_items: Optional[int] = None,
max_items: Optional[int] = None,
unique_items: Optional[bool] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
allow_mutation: bool = True,
regex: Optional[str] = None,
discriminator: Optional[str] = None,
repr: bool = True,
**extra: Any,
) -> Any:
"""
Used to provide extra information about a field, either for the model schema or complex validation. Some arguments
apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``.
:param default: since this is replacing the field’s default, its first argument is used
to set the default, use ellipsis (``...``) to indicate the field is required
:param default_factory: callable that will be called when a default value is needed for this field
If both `default` and `default_factory` are set, an error is raised.
:param alias: the public name of the field
:param title: can be any string, used in the schema
:param description: can be any string, used in the schema
:param exclude: exclude this field while dumping.
Takes same values as the ``include`` and ``exclude`` arguments on the ``.dict`` method.
:param include: include this field while dumping.
Takes same values as the ``include`` and ``exclude`` arguments on the ``.dict`` method.
:param const: this field is required and *must* take it's default value
:param gt: only applies to numbers, requires the field to be "greater than". The schema
will have an ``exclusiveMinimum`` validation keyword
:param ge: only applies to numbers, requires the field to be "greater than or equal to". The
schema will have a ``minimum`` validation keyword
:param lt: only applies to numbers, requires the field to be "less than". The schema
will have an ``exclusiveMaximum`` validation keyword
:param le: only applies to numbers, requires the field to be "less than or equal to". The
schema will have a ``maximum`` validation keyword
:param multiple_of: only applies to numbers, requires the field to be "a multiple of". The
schema will have a ``multipleOf`` validation keyword
:param allow_inf_nan: only applies to numbers, allows the field to be NaN or infinity (+inf or -inf),
which is a valid Python float. Default True, set to False for compatibility with JSON.
:param max_digits: only applies to Decimals, requires the field to have a maximum number
of digits within the decimal. It does not include a zero before the decimal point or trailing decimal zeroes.
:param decimal_places: only applies to Decimals, requires the field to have at most a number of decimal places
allowed. It does not include trailing decimal zeroes.
:param min_items: only applies to lists, requires the field to have a minimum number of
elements. The schema will have a ``minItems`` validation keyword
:param max_items: only applies to lists, requires the field to have a maximum number of
elements. The schema will have a ``maxItems`` validation keyword
:param unique_items: only applies to lists, requires the field not to have duplicated
elements. The schema will have a ``uniqueItems`` validation keyword
:param min_length: only applies to strings, requires the field to have a minimum length. The
schema will have a ``minLength`` validation keyword
:param max_length: only applies to strings, requires the field to have a maximum length. The
schema will have a ``maxLength`` validation keyword
:param allow_mutation: a boolean which defaults to True. When False, the field raises a TypeError if the field is
assigned on an instance. The BaseModel Config must set validate_assignment to True
:param regex: only applies to strings, requires the field match against a regular expression
pattern string. The schema will have a ``pattern`` validation keyword
:param discriminator: only useful with a (discriminated a.k.a. tagged) `Union` of sub models with a common field.
The `discriminator` is the name of this common field to shorten validation and improve generated schema
:param repr: show this field in the representation
:param **extra: any additional keyword arguments will be added as is to the schema
"""
field_info = FieldInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclude=exclude,
include=include,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
allow_inf_nan=allow_inf_nan,
max_digits=max_digits,
decimal_places=decimal_places,
min_items=min_items,
max_items=max_items,
unique_items=unique_items,
min_length=min_length,
max_length=max_length,
allow_mutation=allow_mutation,
regex=regex,
discriminator=discriminator,
repr=repr,
**extra,
)
field_info._validate()
return field_info
# used to be an enum but changed to int's for small performance improvement as less access overhead
SHAPE_SINGLETON = 1
SHAPE_LIST = 2
SHAPE_SET = 3
SHAPE_MAPPING = 4
SHAPE_TUPLE = 5
SHAPE_TUPLE_ELLIPSIS = 6
SHAPE_SEQUENCE = 7
SHAPE_FROZENSET = 8
SHAPE_ITERABLE = 9
SHAPE_GENERIC = 10
SHAPE_DEQUE = 11
SHAPE_DICT = 12
SHAPE_DEFAULTDICT = 13
SHAPE_COUNTER = 14
SHAPE_NAME_LOOKUP = {
SHAPE_LIST: 'List[{}]',
SHAPE_SET: 'Set[{}]',
SHAPE_TUPLE_ELLIPSIS: 'Tuple[{}, ...]',
SHAPE_SEQUENCE: 'Sequence[{}]',
SHAPE_FROZENSET: 'FrozenSet[{}]',
SHAPE_ITERABLE: 'Iterable[{}]',
SHAPE_DEQUE: 'Deque[{}]',
SHAPE_DICT: 'Dict[{}]',
SHAPE_DEFAULTDICT: 'DefaultDict[{}]',
SHAPE_COUNTER: 'Counter[{}]',
}
MAPPING_LIKE_SHAPES: Set[int] = {SHAPE_DEFAULTDICT, SHAPE_DICT, SHAPE_MAPPING, SHAPE_COUNTER}
| FieldInfo |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_health_checks.py | {
"start": 4907,
"end": 7382
} | class ____(RuleBasedStateMachine):
_ = rule()(lambda self: None)
@invariant(check_during_init=True)
def r(self):
return "any non-None value"
@pytest.mark.parametrize(
"cls", [ReturningRuleMachine, ReturningInitializeMachine, ReturningInvariantMachine]
)
def test_stateful_returnvalue_healthcheck(cls):
with pytest.raises(FailedHealthCheck):
run_state_machine_as_test(cls, settings=settings())
def test_nested_given_raises_healthcheck():
@given(st.integers())
def f(n1):
@given(st.integers())
def g(n2):
pass
g()
with pytest.raises(FailedHealthCheck):
f()
def test_triply_nested_given_raises_healthcheck():
@given(st.integers())
@settings(max_examples=10)
def f(n1):
@given(st.integers())
@settings(max_examples=10)
def g(n2):
@given(st.integers())
@settings(max_examples=10)
def h(n3):
pass
h()
g()
with pytest.raises(FailedHealthCheck):
f()
@xfail_on_crosshair(Why.nested_given)
def test_can_suppress_nested_given():
@given(st.integers())
@settings(suppress_health_check=[HealthCheck.nested_given], max_examples=5)
def f(n1):
@given(st.integers())
@settings(max_examples=5)
def g(n2):
pass
g()
f()
def test_cant_suppress_nested_given_on_inner():
# nested_given has to be suppressed at the function right above the nesting.
# this isn't a principled design choice, but a limitation of how we access
# the current settings.
@given(st.integers())
@settings(max_examples=5)
def f(n1):
@given(st.integers())
@settings(suppress_health_check=[HealthCheck.nested_given], max_examples=5)
def g(n2):
pass
g()
with pytest.raises(FailedHealthCheck):
f()
@xfail_on_crosshair(Why.nested_given)
def test_suppress_triply_nested_given():
# both suppressions are necessary here
@given(st.integers())
@settings(suppress_health_check=[HealthCheck.nested_given], max_examples=5)
def f(n1):
@given(st.integers())
@settings(suppress_health_check=[HealthCheck.nested_given], max_examples=5)
def g(n2):
@given(st.integers())
@settings(max_examples=5)
def h(n3):
pass
h()
g()
f()
| ReturningInvariantMachine |
python | pypa__pip | docs/pip_sphinxext.py | {
"start": 6571,
"end": 6849
} | class ____(PipOptions):
required_arguments = 1
def process_options(self) -> None:
cmd = create_command(self.arguments[0])
self._format_options(
cmd.parser.option_groups[0].option_list,
cmd_name=cmd.name,
)
| PipCommandOptions |
python | redis__redis-py | redis/commands/search/reducers.py | {
"start": 310,
"end": 476
} | class ____(Reducer):
"""
Counts the number of results in the group
"""
NAME = "COUNT"
def __init__(self) -> None:
super().__init__()
| count |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 3946,
"end": 5462
} | class ____(GeneratedAirbyteSource):
class CentralAPIRouter:
@public
def __init__(self, user_name: str, user_secret: str, site_id: str):
self.auth_type = "CENTRAL_API_ROUTER"
self.user_name = check.str_param(user_name, "user_name")
self.user_secret = check.str_param(user_secret, "user_secret")
self.site_id = check.str_param(site_id, "site_id")
class SingleStoreAccessToken:
@public
def __init__(self, access_token: str, store_name: str):
self.auth_type = "SINGLE_STORE_ACCESS_TOKEN"
self.access_token = check.str_param(access_token, "access_token")
self.store_name = check.str_param(store_name, "store_name")
@public
def __init__(
self,
name: str,
credentials: Union["CartSource.CentralAPIRouter", "CartSource.SingleStoreAccessToken"],
start_date: str,
):
"""Airbyte Source for Cart.
Documentation can be found at https://docs.airbyte.com/integrations/sources/cart
Args:
name (str): The name of the destination.
start_date (str): The date from which you'd like to replicate the data
"""
self.credentials = check.inst_param(
credentials,
"credentials",
(CartSource.CentralAPIRouter, CartSource.SingleStoreAccessToken),
)
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Cart", name)
| CartSource |
python | ray-project__ray | python/ray/serve/tests/unit/test_task_consumer.py | {
"start": 3618,
"end": 8930
} | class ____:
"""Test the task_consumer decorator."""
def _verify_and_cleanup(self, instance, expected_calls=None):
"""Verify consumer and cleanup instance."""
instance.initialize_callable(5)
adapter = instance._adapter
assert adapter._start_consumer_received
if expected_calls is not None:
if expected_calls:
calls = [call(method, name=name) for method, name in expected_calls]
adapter.register_task_handle_mock.assert_has_calls(
calls, any_order=False
)
assert adapter.register_task_handle_mock.call_count == len(
expected_calls
)
else:
adapter.register_task_handle_mock.assert_not_called()
del instance
def _run_consumer_test(
self, config, consumer_class_factory, expected_calls_factory=None
):
"""Run a consumer test with factory functions."""
consumer_class = consumer_class_factory(config)
instance = consumer_class()
expected_calls = (
expected_calls_factory(instance) if expected_calls_factory else None
)
self._verify_and_cleanup(instance, expected_calls)
def test_task_consumer_basic(self, config):
"""Test basic functionality of the task_consumer decorator."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
@task_handler
def my_task(self):
pass
return MyConsumer
self._run_consumer_test(
config, make_consumer, lambda inst: [(inst.my_task, "my_task")]
)
def test_task_consumer_multiple_handlers(self, config):
"""Test with multiple task handlers."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
@task_handler
def task1(self):
pass
@task_handler
def task2(self):
pass
return MyConsumer
self._run_consumer_test(
config,
make_consumer,
lambda inst: [(inst.task1, "task1"), (inst.task2, "task2")],
)
def test_task_consumer_custom_names(self, config):
"""Test task handlers with and without custom names."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
@task_handler(name="custom_task")
def task1(self):
pass
@task_handler
def task2(self):
pass
return MyConsumer
self._run_consumer_test(
config,
make_consumer,
lambda inst: [(inst.task1, "custom_task"), (inst.task2, "task2")],
)
def test_task_consumer_init_args(self, config):
"""Test that __init__ arguments are passed correctly."""
@task_consumer(task_processor_config=config)
class MyConsumer:
def __init__(self, value):
self.value = value
instance = MyConsumer(value=42)
assert instance.value == 42
self._verify_and_cleanup(instance)
def test_task_consumer_no_handlers(self, config):
"""Test with a class that has no task handlers."""
def make_consumer(cfg):
@task_consumer(task_processor_config=cfg)
class MyConsumer:
def some_method(self):
pass
return MyConsumer
self._run_consumer_test(config, make_consumer, lambda inst: [])
def test_task_consumer_inheritance(self, config):
"""Test that inherited task handlers are registered."""
def make_consumer(cfg):
class BaseConsumer:
@task_handler
def base_task(self):
pass
@task_consumer(task_processor_config=cfg)
class DerivedConsumer(BaseConsumer):
@task_handler
def derived_task(self):
pass
return DerivedConsumer
self._run_consumer_test(
config,
make_consumer,
lambda inst: [
(inst.base_task, "base_task"),
(inst.derived_task, "derived_task"),
],
)
def test_task_consumer_no_args_decorator(self):
"""Test using @task_consumer without arguments raises TypeError."""
with pytest.raises(TypeError):
@task_consumer
class MyConsumer:
pass
def test_default_deployment_name_stays_same_with_task_consumer(config):
"""Test that the default deployment name is the class name when using task_consumer with serve.deployment."""
@deployment
@task_consumer(task_processor_config=config)
class MyTaskConsumer:
@task_handler
def my_task(self):
pass
# The deployment name should default to the class name
assert MyTaskConsumer.name == "MyTaskConsumer"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestTaskConsumerDecorator |
python | huggingface__transformers | src/transformers/models/bertweet/tokenization_bertweet.py | {
"start": 1370,
"end": 21199
} | class ____(PreTrainedTokenizer):
"""
Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
normalization (`bool`, *optional*, defaults to `False`):
Whether or not to apply a normalization preprocess.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
merges_file,
normalization=False,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs,
):
try:
from emoji import demojize
self.demojizer = demojize
except ImportError:
logger.warning(
"emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3"
" install emoji==0.6.0"
)
self.demojizer = None
self.vocab_file = vocab_file
self.merges_file = merges_file
self.encoder = {}
self.encoder[str(bos_token)] = 0
self.encoder[str(pad_token)] = 1
self.encoder[str(eos_token)] = 2
self.encoder[str(unk_token)] = 3
self.add_from_file(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:-1]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.normalization = normalization
self.tweetPreprocessor = TweetTokenizer()
self.special_puncts = {"’": "'", "…": "..."}
super().__init__(
normalization=normalization,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
# Configure patterns instead of overriding methods
token_type_ids_pattern="all_zeros", # BERTweet doesn't use token type IDs
token_type_ids_include_special_tokens=True,
special_tokens_pattern="cls_double_sep", # <s> X </s></s> Y </s>
**kwargs,
)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
if self.normalization: # Perform Tweet normalization before performing BPE
text = self.normalizeTweet(text)
split_tokens = []
words = re.findall(r"\S+\n?", text)
for token in words:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
def normalizeTweet(self, tweet):
"""
Normalize a raw Tweet
"""
for punct in self.special_puncts:
tweet = tweet.replace(punct, self.special_puncts[punct])
tokens = self.tweetPreprocessor.tokenize(tweet)
normTweet = " ".join([self.normalizeToken(token) for token in tokens])
normTweet = (
normTweet.replace("cannot ", "can not ")
.replace("n't ", " n't ")
.replace("n 't ", " n't ")
.replace("ca n't", "can't")
.replace("ai n't", "ain't")
)
normTweet = (
normTweet.replace("'m ", " 'm ")
.replace("'re ", " 're ")
.replace("'s ", " 's ")
.replace("'ll ", " 'll ")
.replace("'d ", " 'd ")
.replace("'ve ", " 've ")
)
normTweet = (
normTweet.replace(" p . m .", " p.m.")
.replace(" p . m ", " p.m ")
.replace(" a . m .", " a.m.")
.replace(" a . m ", " a.m ")
)
return " ".join(normTweet.split())
def normalizeToken(self, token):
"""
Normalize tokens in a Tweet
"""
lowercased_token = token.lower()
if token.startswith("@"):
return "@USER"
elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
return "HTTPURL"
elif len(token) == 1:
if token in self.special_puncts:
return self.special_puncts[token]
if self.demojizer is not None:
return self.demojizer(token)
else:
return token
else:
return token
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace("@@ ", "").strip()
return out_string
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str, ...]:
"""
Save the vocabulary and merges files to a directory.
"""
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return ()
vocab_files_names = getattr(self, "vocab_files_names", {})
prefix = f"{filename_prefix}-" if filename_prefix else ""
# Save vocabulary in the format expected by add_from_file: <token> <id>
# Exclude special tokens (IDs 0-3) as they are added in __init__ before add_from_file
vocab_file = os.path.join(save_directory, prefix + vocab_files_names.get("vocab_file", "vocab.txt"))
with open(vocab_file, "w", encoding="utf-8") as f:
for token, token_id in sorted(self.encoder.items(), key=lambda kv: kv[1]):
# Only save tokens with ID >= 4, as IDs 0-3 are reserved for special tokens
if token_id >= 4:
f.write(f"{token} {token_id}\n")
# Save BPE merges
merge_file = os.path.join(save_directory, prefix + vocab_files_names.get("merges_file", "bpe.codes"))
with open(merge_file, "w", encoding="utf-8") as writer:
writer.writelines(
" ".join(bpe_tokens) + "\n"
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1])
)
return (vocab_file, merge_file)
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
return
lines = f.readlines()
for lineTmp in lines:
line = lineTmp.strip()
idx = line.rfind(" ")
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
self.encoder[word] = len(self.encoder)
# Natural Language Toolkit: Twitter Tokenizer
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Christopher Potts <cgpotts@stanford.edu>
# Ewan Klein <ewan@inf.ed.ac.uk> (modifications)
# Pierpaolo Pantone <> (modifications)
# URL: http://nltk.org/
# For license information, see LICENSE.TXT
#
"""
Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
1. The tuple regex_strings defines a list of regular expression strings.
2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
the class Tokenizer.
4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
is set to False, then the tokenizer will lowercase everything except for emoticons.
"""
######################################################################
#
# import regex # https://github.com/nltk/nltk/issues/2409
# import html
#
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most importantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
# This particular element is used in a couple ways, so we define it
# with a name:
# docstyle-ignore
EMOTICONS = r"""
(?:
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
|
<3 # heart
)"""
# URL pattern due to John Gruber, modified by Tom Winzig. See
# https://gist.github.com/winzig/8894715
# docstyle-ignore
URLS = r""" # Capture 1: entire matched URL
(?:
https?: # URL protocol and colon
(?:
/{1,3} # 1-3 slashes
| # or
[a-z0-9%] # Single letter or digit or '%'
# (Trying not to match e.g. "URI::Escape")
)
| # or
# looks like domain name followed by a slash:
[a-z0-9.\-]+[.]
(?:[a-z]{2,13})
/
)
(?: # One or more:
[^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
| # or
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
)+
(?: # End with:
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
| # or
[^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
)
| # OR, the following to match naked domains:
(?:
(?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
[a-z0-9]+
(?:[.\-][a-z0-9]+)*
[.]
(?:[a-z]{2,13})
\b
/?
(?!@) # not succeeded by a @,
# avoid matching "foo.na" in "foo.na@example.com"
)
"""
# docstyle-ignore
# The components of the tokenizer:
REGEXPS = (
URLS,
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[ *\-.\)]*
)?
(?: # (area code)
[\(]?
\d{3}
[ *\-.\)]*
)?
\d{3} # exchange
[ *\-.\)]*
\d{4} # base
)""",
# ASCII Emoticons
EMOTICONS,
# HTML tags:
r"""<[^>\s]+>""",
# ASCII Arrows
r"""[\-]+>|<[\-]+""",
# Twitter username:
r"""(?:@[\w_]+)""",
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
# email addresses
r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
# docstyle-ignore
# Remaining word types:
r"""
(?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
""",
)
######################################################################
# This is the core tokenizing regex:
WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
# WORD_RE performs poorly on these patterns:
HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
# The emoticon string gets its own regex so that we can preserve case for
# them as needed:
EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
# These are for regularizing HTML entities to Unicode:
ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
######################################################################
# Functions for converting html entities
######################################################################
def _str_to_unicode(text, encoding=None, errors="strict"):
if encoding is None:
encoding = "utf-8"
if isinstance(text, bytes):
return text.decode(encoding, errors)
return text
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
"""
Remove entities from text by converting them to their corresponding unicode character.
Args:
text:
A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
keep (list):
List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and
`&#hhhh;`) and named entities (such as ` ` or `>`).
remove_illegal (bool):
If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
kept "as is".
Returns: A unicode string with the entities removed.
See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
Examples:
```python
>>> from nltk.tokenize.casual import _replace_html_entities
>>> _replace_html_entities(b"Price: £100")
'Price: \\xa3100'
>>> print(_replace_html_entities(b"Price: £100"))
Price: £100
```"""
def _convert_entity(match):
entity_body = match.group(3)
if match.group(1):
try:
if match.group(2):
number = int(entity_body, 16)
else:
number = int(entity_body, 10)
# Numeric character references in the 80-9F range are typically
# interpreted by browsers as representing the characters mapped
# to bytes 80-9F in the Windows-1252 encoding. For more info
# see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
if 0x80 <= number <= 0x9F:
return bytes((number,)).decode("cp1252")
except ValueError:
number = None
else:
if entity_body in keep:
return match.group(0)
else:
number = html.entities.name2codepoint.get(entity_body)
if number is not None:
try:
return chr(number)
except (ValueError, OverflowError):
pass
return "" if remove_illegal else match.group(0)
return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
######################################################################
| BertweetTokenizer |
python | huggingface__transformers | src/transformers/models/vitmatte/modeling_vitmatte.py | {
"start": 2598,
"end": 3388
} | class ____(nn.Module):
"""
Basic convolution layers including: Conv3x3, BatchNorm2d, ReLU layers.
"""
def __init__(self, config, in_channels, out_channels, stride=2, padding=1):
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=False,
)
self.batch_norm = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
self.relu = nn.ReLU()
def forward(self, hidden_state):
hidden_state = self.conv(hidden_state)
hidden_state = self.batch_norm(hidden_state)
hidden_state = self.relu(hidden_state)
return hidden_state
| VitMatteBasicConv3x3 |
python | Pylons__pyramid | tests/test_events.py | {
"start": 4845,
"end": 5622
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.events import BeforeTraversal
return BeforeTraversal
def _makeOne(self, request=None):
if request is None:
request = DummyRequest()
return self._getTargetClass()(request)
def test_class_conforms_to_IBeforeTraversal(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IBeforeTraversal
verifyClass(IBeforeTraversal, self._getTargetClass())
def test_instance_conforms_to_IBeforeTraversal(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IBeforeTraversal
verifyObject(IBeforeTraversal, self._makeOne())
| BeforeTraversalEventTests |
python | psf__requests | src/requests/exceptions.py | {
"start": 2375,
"end": 2566
} | class ____(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
| ConnectTimeout |
python | scikit-learn__scikit-learn | sklearn/model_selection/_split.py | {
"start": 23712,
"end": 31817
} | class ____(_BaseKFold):
"""Class-wise stratified K-Fold cross-validator.
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class in `y` in a binary or multiclass classification
setting.
Read more in the :ref:`User Guide <stratified_k_fold>`.
For visualisation of cross-validation behaviour and
comparison between common scikit-learn split methods
refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py`
.. note::
Stratification on the class label solves an engineering problem rather
than a statistical one. See :ref:`stratification` for more details.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits()
2
>>> print(skf)
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(skf.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3]
Test: index=[0 2]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
Notes
-----
The implementation is designed to:
* Generate test sets such that all contain the same distribution of
classes, or as close as possible.
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Preserve order dependencies in the dataset ordering, when
``shuffle=False``: all samples from class k in some test set were
contiguous in y, or separated in y by samples from classes other than k.
* Generate test sets where the smallest and largest differ by at most one
sample.
.. versionchanged:: 0.22
The previous implementation did not follow the last constraint.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _make_test_folds(self, X, y=None):
rng = check_random_state(self.random_state)
# XXX: as of now, cross-validation splitters only operate in NumPy-land
# without attempting to leverage array API namespace features. However
# they might be fed by array API inputs, e.g. in CV-enabled estimators so
# we need the following explicit conversion:
xp, is_array_api = get_namespace(y)
if is_array_api:
y = _convert_to_numpy(y, xp)
else:
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ("binary", "multiclass")
if type_of_target_y not in allowed_target_types:
raise ValueError(
"Supported target types are: {}. Got {!r} instead.".format(
allowed_target_types, type_of_target_y
)
)
y = column_or_1d(y)
_, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True)
# y_inv encodes y according to lexicographic order. We invert y_idx to
# map the classes so that they are encoded by order of appearance:
# 0 represents the first label appearing in y, 1 the second, etc.
_, class_perm = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError(
"n_splits=%d cannot be greater than the"
" number of members in each class." % (self.n_splits)
)
if self.n_splits > min_groups:
warnings.warn(
"The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (min_groups, self.n_splits),
UserWarning,
)
# Determine the optimal number of samples from each class in each fold,
# using round robin over the sorted y. (This can be done direct from
# counts, but that code is unreadable.)
y_order = np.sort(y_encoded)
allocation = np.asarray(
[
np.bincount(y_order[i :: self.n_splits], minlength=n_classes)
for i in range(self.n_splits)
]
)
# To maintain the data order dependencies as best as possible within
# the stratification constraint, we assign samples from each class in
# blocks (and then mess that up when shuffle=True).
test_folds = np.empty(len(y), dtype="i")
for k in range(n_classes):
# since the kth column of allocation stores the number of samples
# of class k in each test set, this generates blocks of fold
# indices corresponding to the allocation for class k.
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
| StratifiedKFold |
python | doocs__leetcode | solution/0900-0999/0991.Broken Calculator/Solution.py | {
"start": 0,
"end": 307
} | class ____:
def brokenCalc(self, startValue: int, target: int) -> int:
ans = 0
while startValue < target:
if target & 1:
target += 1
else:
target >>= 1
ans += 1
ans += startValue - target
return ans
| Solution |
python | huggingface__transformers | tests/models/llava/test_configuration_llava.py | {
"start": 72,
"end": 2341
} | class ____(unittest.TestCase):
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
def test_pixtral_reload(self):
"""
Simple test for reloading pixtral configs
"""
vision_config = {
"model_type": "pixtral",
"head_dim": 64,
"hidden_act": "silu",
"image_size": 1024,
"is_composition": True,
"patch_size": 16,
"rope_theta": 10000.0,
"tie_word_embeddings": False,
}
text_config = {
"model_type": "mistral",
"hidden_size": 5120,
"head_dim": 128,
"num_attention_heads": 32,
"intermediate_size": 14336,
"is_composition": True,
"max_position_embeddings": 1024000,
"num_hidden_layers": 40,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-05,
"rope_theta": 1000000000.0,
"sliding_window": None,
"vocab_size": 131072,
}
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(vision_config=vision_config, text_config=text_config)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
def test_arbitrary_reload(self):
"""
Simple test for reloading arbitrarily composed subconfigs
"""
default_values = LlavaConfig().to_diff_dict()
default_values["vision_config"]["model_type"] = "pixtral"
default_values["text_config"]["model_type"] = "opt"
self.maxDiff = None
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(**default_values)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
self.assertDictEqual(config.to_dict(), reloaded.to_dict())
| LlavaConfigTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_simple04.py | {
"start": 358,
"end": 4214
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("simple04.xlsx")
def test_create_file(self):
"""Test dates and times."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
date1 = datetime.strptime("12:00", "%H:%M")
date2 = datetime.strptime("2013-01-27", "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1, format1)
worksheet.write_datetime(1, 0, date2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_write(self):
"""Test dates and times with write() method."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
date1 = datetime.strptime("12:00", "%H:%M")
date2 = datetime.strptime("2013-01-27", "%Y-%m-%d")
worksheet.write(0, 0, date1, format1)
worksheet.write(1, 0, date2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_A1(self):
"""Test dates and times in A1 notation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("A:A", 12)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
date1 = datetime.strptime("12:00", "%H:%M")
date2 = datetime.strptime("2013-01-27", "%Y-%m-%d")
worksheet.write_datetime("A1", date1, format1)
worksheet.write_datetime("A2", date2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_date_and_time1(self):
"""Test dates and times with datetime .date and .time."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("A:A", 12)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
date1 = time(12)
date2 = date(2013, 1, 27)
worksheet.write_datetime("A1", date1, format1)
worksheet.write_datetime("A2", date2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_date_and_time2(self):
"""Test dates and times with datetime .date and .time. and write()"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("A:A", 12)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
date1 = time(12)
date2 = date(2013, 1, 27)
worksheet.write("A1", date1, format1)
worksheet.write("A2", date2, format2)
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test dates and times."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
date1 = datetime.strptime("12:00", "%H:%M")
date2 = datetime.strptime("2013-01-27", "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1, format1)
worksheet.write_datetime(1, 0, date2, format2)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/model_fields/test_integerfield.py | {
"start": 9284,
"end": 9583
} | class ____(IntegerFieldTests):
model = PositiveSmallIntegerModel
documented_range = (0, 32767)
rel_db_type_class = (
models.PositiveSmallIntegerField
if connection.features.related_fields_match_type
else models.SmallIntegerField
)
| PositiveSmallIntegerFieldTests |
python | pytorch__pytorch | test/inductor/test_mix_order_reduction.py | {
"start": 699,
"end": 2124
} | class ____(TestBase):
"""
Illustate the cases that we skip mix-order reduction. We skip in cases
like when the outer reduction is followed by a pointwise that load
the un-reduced tensor.
"""
@inductor_config.patch(split_reductions=False)
def test_dimension_too_close(self):
"""
Skip if the two reduction size are too close.
We require one reduction dimension to be much larger so we can split
that dimension and make it efficient.
"""
def f(x):
out1 = x.sum(dim=1)
out2 = x.sum(dim=0)
return out1, out2
x = torch.randn(768, 768, device=GPU_TYPE)
torch.compile(f)(x)
self.assertEqual(2, metrics.generated_kernel_count)
@inductor_config.patch(split_reductions=False)
def test_skip_if_outer_reduction_followed_by_full_pointwise(self):
"""
Skip for now if the outer reduction is followed by a pointwise node
accessing the original tensor. Accessing the reduced tensor is fine
(e.g. to support torch.mean).
"""
def f(x):
out1 = x.sum(dim=1)
out2 = x.sum(dim=0, keepdim=True) + x
return out1, out2
x = torch.randn(32768, 768, device=GPU_TYPE)
self.check_numeric(f, (x,))
self.assertEqual(0, metrics.codegen_mix_order_reduction)
@instantiate_parametrized_tests
| SkipPatternTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zenloop/source_zenloop/streams.py | {
"start": 1872,
"end": 3062
} | class ____:
parent_stream_class: Optional[ZenloopStream] = None
def stream_slices(self, sync_mode, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
# determine if parent_stream_class is Surveys or SurveyGroups
if self.parent_stream_class.__name__ == "Surveys":
public_hash_id = self.survey_id
else:
public_hash_id = self.survey_group_id
# loop through all survey_id's if None was provided
# return nothing otherwise
if not public_hash_id:
for item in self.parent_stream_class(
api_token=self.api_token, date_from=self.date_from, survey_id=self.survey_id, survey_group_id=self.survey_group_id
).read_records(sync_mode=sync_mode):
# set date_from to most current cursor_field or date_from if not incremental
if stream_state:
date_from = stream_state[self.cursor_field]
else:
date_from = self.date_from
yield {"survey_slice": item["public_hash_id"], "date_from": date_from}
else:
yield None
| ChildStreamMixin |
python | langchain-ai__langchain | libs/core/langchain_core/utils/iter.py | {
"start": 2955,
"end": 7300
} | class ____(Generic[T]):
"""Create `n` separate asynchronous iterators over `iterable`.
This splits a single `iterable` into multiple iterators, each providing
the same items in the same order.
All child iterators may advance separately but share the same items
from `iterable` -- when the most advanced iterator retrieves an item,
it is buffered until the least advanced iterator has yielded it as well.
A `tee` works lazily and can handle an infinite `iterable`, provided
that all iterators advance.
```python
async def derivative(sensor_data):
previous, current = a.tee(sensor_data, n=2)
await a.anext(previous) # advance one iterator
return a.map(operator.sub, previous, current)
```
Unlike `itertools.tee`, `.tee` returns a custom type instead
of a :py`tuple`. Like a tuple, it can be indexed, iterated and unpacked
to get the child iterators. In addition, its `.tee.aclose` method
immediately closes all children, and it can be used in an `async with` context
for the same effect.
If `iterable` is an iterator and read elsewhere, `tee` will *not*
provide these items. Also, `tee` must internally buffer each item until the
last iterator has yielded it; if the most and least advanced iterator differ
by most data, using a :py`list` is more efficient (but not lazy).
If the underlying iterable is concurrency safe (`anext` may be awaited
concurrently) the resulting iterators are concurrency safe as well. Otherwise,
the iterators are safe if there is only ever one single "most advanced" iterator.
To enforce sequential use of `anext`, provide a `lock`
- e.g. an :py`asyncio.Lock` instance in an :py:mod:`asyncio` application -
and access is automatically synchronised.
"""
def __init__(
self,
iterable: Iterator[T],
n: int = 2,
*,
lock: AbstractContextManager[Any] | None = None,
):
"""Create a `tee`.
Args:
iterable: The iterable to split.
n: The number of iterators to create.
lock: The lock to synchronise access to the shared buffers.
"""
self._iterator = iter(iterable)
self._buffers: list[deque[T]] = [deque() for _ in range(n)]
self._children = tuple(
tee_peer(
iterator=self._iterator,
buffer=buffer,
peers=self._buffers,
lock=lock if lock is not None else NoLock(),
)
for buffer in self._buffers
)
def __len__(self) -> int:
"""Return the number of child iterators."""
return len(self._children)
@overload
def __getitem__(self, item: int) -> Iterator[T]: ...
@overload
def __getitem__(self, item: slice) -> tuple[Iterator[T], ...]: ...
def __getitem__(self, item: int | slice) -> Iterator[T] | tuple[Iterator[T], ...]:
"""Return the child iterator(s) at the given index or slice."""
return self._children[item]
def __iter__(self) -> Iterator[Iterator[T]]:
"""Return an iterator over the child iterators.
Yields:
The child iterators.
"""
yield from self._children
def __enter__(self) -> "Tee[T]":
"""Return Tee instance."""
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> Literal[False]:
"""Close all child iterators.
Returns:
False (exception not suppressed).
"""
self.close()
return False
def close(self) -> None:
"""Close all child iterators."""
for child in self._children:
child.close()
# Why this is needed https://stackoverflow.com/a/44638570
safetee = Tee
def batch_iterate(size: int | None, iterable: Iterable[T]) -> Iterator[list[T]]:
"""Utility batching function.
Args:
size: The size of the batch. If `None`, returns a single batch.
iterable: The iterable to batch.
Yields:
The batches of the iterable.
"""
it = iter(iterable)
while True:
chunk = list(islice(it, size))
if not chunk:
return
yield chunk
| Tee |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-make-array-equal.py | {
"start": 902,
"end": 1627
} | class ____(object):
def minCost(self, nums, cost):
"""
:type nums: List[int]
:type cost: List[int]
:rtype: int
"""
def f(x):
return sum(abs(y-x)*c for y, c in itertools.izip(nums, cost))
def check(x):
return x+1 == len(idxs) or f(nums[idxs[x]]) < f(nums[idxs[x+1]])
idxs = range(len(nums))
idxs.sort(key=lambda x: nums[x])
left, right = 0, len(idxs)-1
while left <= right:
mid = left+(right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return f(nums[idxs[left]])
# Time: O(nlogn)
# Space: O(n)
# prefix sum
| Solution2 |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-click/tests/test_formatter.py | {
"start": 151,
"end": 14857
} | class ____(unittest.TestCase):
"""Validate basic ``click.Command`` instances."""
maxDiff = None
def test_no_parameters(self):
"""Validate a `click.Command` with no parameters.
This exercises the code paths for a command with *no* arguments, *no*
options and *no* environment variables.
"""
@click.command()
def foobar():
"""A sample command."""
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS]
"""
).lstrip(),
"\n".join(output),
)
def test_basic_parameters(self):
"""Validate a combination of parameters.
This exercises the code paths for a command with arguments, options and
environment variables.
"""
@click.command()
@click.option("--param", envvar="PARAM", help="A sample option")
@click.option("--another", metavar="[FOO]", help="Another option")
@click.option(
"--choice",
help="A sample option with choices",
type=click.Choice(["Option1", "Option2"]),
)
@click.option(
"--numeric-choice",
metavar="<choice>",
help="A sample option with numeric choices",
type=click.Choice([1, 2, 3]),
)
@click.option(
"--flag",
is_flag=True,
help="A boolean flag",
)
@click.argument("ARG", envvar="ARG")
def foobar(bar):
"""A sample command."""
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS] ARG
.. rubric:: Options
.. option:: --param <param>
A sample option
.. option:: --another <FOO>
Another option
.. option:: --choice <choice>
A sample option with choices
:options: Option1 | Option2
.. option:: --numeric-choice <choice>
A sample option with numeric choices
:options: 1 | 2 | 3
.. option:: --flag
A boolean flag
.. rubric:: Arguments
.. option:: ARG
Required argument
.. rubric:: Environment variables
.. _foobar-param-PARAM:
.. envvar:: PARAM
:noindex:
Provide a default for :option:`--param`
.. _foobar-arg-ARG:
.. envvar:: ARG
:noindex:
Provide a default for :option:`ARG`
"""
).lstrip(),
"\n".join(output),
)
def test_help_epilog(self):
"""Validate formatting of explicit help and epilog strings."""
@click.command(help="A sample command.", epilog="A sample epilog.")
@click.option("--param", help="A sample option")
def foobar(bar):
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS]
.. rubric:: Options
.. option:: --param <param>
A sample option
A sample epilog.
"""
).lstrip(),
"\n".join(output),
)
def test_help_argument(self):
"""Validate a help text for arguments.
While click only provides the help attribute for options, but not for arguments,
it allows customization with subclasses.
"""
class CustomArgument(click.Argument):
def __init__(self, *args, help=None, **kwargs): # noqa
super().__init__(*args, **kwargs)
self.help = help
@click.command()
@click.option("--option", help="A sample option")
@click.argument("ARG", help="A sample argument", cls=CustomArgument)
@click.argument("ARG_NO_HELP", cls=CustomArgument)
def foobar(bar):
"""A sample command."""
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS] ARG ARG_NO_HELP
.. rubric:: Options
.. option:: --option <option>
A sample option
.. rubric:: Arguments
.. option:: ARG
Required argument
A sample argument
.. option:: ARG_NO_HELP
Required argument
"""
).lstrip(),
"\n".join(output),
)
def test_defaults(self):
"""Validate formatting of user documented defaults."""
@click.command()
@click.option("--num-param", type=int, default=42, show_default=True)
@click.option(
"--param",
default=lambda: None,
show_default="Something computed at runtime",
)
@click.option(
"--group",
default=[("foo", "bar")],
nargs=2,
type=click.Tuple([str, str]),
multiple=True,
show_default=True,
)
@click.option(
"--only-show-default",
show_default="Some default computed at runtime!",
)
@click.option("--string-default", default="abc", show_default=True)
@click.option("--empty-string-default", default="", show_default=True)
def foobar(bar):
"""A sample command."""
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS]
.. rubric:: Options
.. option:: --num-param <num_param>
:default: ``42``
.. option:: --param <param>
:default: ``'Something computed at runtime'``
.. option:: --group <group>
:default: ``('foo', 'bar')``
.. option:: --only-show-default <only_show_default>
:default: ``'Some default computed at runtime!'``
.. option:: --string-default <string_default>
:default: ``'abc'``
.. option:: --empty-string-default <empty_string_default>
:default: ``''``
"""
).lstrip(),
"\n".join(output),
)
def test_show_default(self):
"""Validate formatting of show_default via context_settings."""
@click.command(context_settings={"show_default": True})
@click.option("--no-set", default=0)
@click.option("--set-false", default=0, show_default=False)
def foobar():
"""A sample command."""
pass
ctx = click.Context(foobar, info_name="foobar", show_default=True)
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS]
.. rubric:: Options
.. option:: --no-set <no_set>
:default: ``0``
.. option:: --set-false <set_false>
"""
).lstrip(),
"\n".join(output),
)
def test_hidden(self):
"""Validate a `click.Command` with the `hidden` flag."""
@click.command(hidden=True)
def foobar():
"""A sample command."""
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual("", "\n".join(output))
def test_titles(self):
"""Validate a `click.Command` with nested titles."""
@click.command()
@click.option("--name", help="Name to say hello to.", required=True, type=str)
def hello(name):
"""Prints hello to name given.
Examples:
--------
.. code:: bash
my_cli hello --name "Jack"
"""
ctx = click.Context(hello, info_name="hello")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
Prints hello to name given.
Examples
--------
.. code:: bash
my_cli hello --name "Jack"
.. program:: hello
.. code-block:: shell
hello [OPTIONS]
.. rubric:: Options
.. option:: --name <name>
**Required** Name to say hello to.
"""
).lstrip(),
"\n".join(output),
)
def test_ansi_escape_sequences(self):
"""Validate that ANSI escape sequences are stripped."""
@click.command(epilog="\033[31mA sample epilog.\033[0m")
@click.option(
"--name",
help="Name to say \033[94mhello\033[0m to.",
required=True,
type=str,
)
@click.option(
"--choice",
help="A sample option with choices",
type=click.Choice(["\033[94mOption1\033[0m", "\033[94mOption2\033[0m"]),
)
@click.option(
"--param",
default=lambda: None,
show_default="Something computed at \033[94mruntime\033[0m",
)
def foobar():
r"""A sample command with **sparkles**.
We've got \033[31mred text\033[0m, \033[104mblue backgrounds\033[0m, a
dash of \033[1mbold\033[0m and even some \033[4munderlined words\033[0m.
"""
pass
ctx = click.Context(foobar, info_name="foobar")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command with **sparkles**.
We've got red text, blue backgrounds, a
dash of bold and even some underlined words.
.. program:: foobar
.. code-block:: shell
foobar [OPTIONS]
.. rubric:: Options
.. option:: --name <name>
**Required** Name to say hello to.
.. option:: --choice <choice>
A sample option with choices
:options: Option1 | Option2
.. option:: --param <param>
:default: ``'Something computed at runtime'``
A sample epilog.
"""
).lstrip(),
"\n".join(output),
)
@unittest.skipIf(CLICK_VERSION < (8, 1), "Click < 8.1.0 stores the modified help string")
def test_no_truncation(self):
r"""Validate behavior when a \f character is present.
https://click.palletsprojects.com/en/8.1.x/documentation/#truncating-help-texts
"""
@click.command()
def cli():
r"""First paragraph.
This is a very long second
paragraph and not correctly
wrapped but it will be rewrapped.
\f
:param click.core.Context ctx: Click context.
"""
pass
ctx = click.Context(cli, info_name="cli")
output = list(ext._format_command(ctx, nested="short")) # noqa
# note that we have an extra newline because we're using
# docutils.statemachine.string2lines under the hood, which is
# converting the form feed to a newline
self.assertEqual(
textwrap.dedent(
"""
First paragraph.
This is a very long second
paragraph and not correctly
wrapped but it will be rewrapped.
:param click.core.Context ctx: Click context.
.. program:: cli
.. code-block:: shell
cli [OPTIONS]
"""
).lstrip(),
"\n".join(output),
)
def test_no_line_wrapping(self):
r"""Validate behavior when a \b character is present.
https://click.palletsprojects.com/en/8.1.x/documentation/#preventing-rewrapping
"""
@click.command(
epilog="""
An epilog containing pre-wrapped text.
\b
This is
a paragraph
without rewrapping.
And this is a paragraph
that will be rewrapped again.
"""
)
@click.option(
"--param",
help="""An option containing pre-wrapped text.
\b
This is
a paragraph
without rewrapping.
And this is a paragraph
that will be rewrapped again.
""",
)
def cli():
r"""A command containing pre-wrapped text.
\b
This is
a paragraph
without rewrapping.
And this is a paragraph
that will be rewrapped again.
"""
pass
ctx = click.Context(cli, info_name="cli")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A command containing pre-wrapped text.
| This is
| a paragraph
| without rewrapping.
And this is a paragraph
that will be rewrapped again.
.. program:: cli
.. code-block:: shell
cli [OPTIONS]
.. rubric:: Options
.. option:: --param <param>
An option containing pre-wrapped text.
| This is
| a paragraph
| without rewrapping.
And this is a paragraph
that will be rewrapped again.
An epilog containing pre-wrapped text.
| This is
| a paragraph
| without rewrapping.
And this is a paragraph
that will be rewrapped again.
"""
).lstrip(),
"\n".join(output),
)
| CommandTestCase |
python | numba__numba | numba/core/types/functions.py | {
"start": 25215,
"end": 26002
} | class ____(Callable, DTypeSpec, Opaque):
"""
Type class for number classes (e.g. "np.float64").
"""
def __init__(self, instance_type):
self.instance_type = instance_type
name = "class(%s)" % (instance_type,)
super(NumberClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overridden by the __call__ constructor resolution in typing.builtins
return None
def get_call_signatures(self):
return (), True
def get_impl_key(self, sig):
return type(self)
@property
def key(self):
return self.instance_type
@property
def dtype(self):
return self.instance_type
_RecursiveCallOverloads = namedtuple("_RecursiveCallOverloads", "qualname,uid")
| NumberClass |
python | jd__tenacity | tenacity/retry.py | {
"start": 746,
"end": 1398
} | class ____(abc.ABC):
"""Abstract base class for retry strategies."""
@abc.abstractmethod
def __call__(self, retry_state: "RetryCallState") -> bool:
pass
def __and__(self, other: "retry_base") -> "retry_all":
return other.__rand__(self)
def __rand__(self, other: "retry_base") -> "retry_all":
return retry_all(other, self)
def __or__(self, other: "retry_base") -> "retry_any":
return other.__ror__(self)
def __ror__(self, other: "retry_base") -> "retry_any":
return retry_any(other, self)
RetryBaseT = typing.Union[retry_base, typing.Callable[["RetryCallState"], bool]]
| retry_base |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 20139,
"end": 22177
} | class ____(BaseTest):
"""
Test DLL dependencies are within a certain expected set.
"""
@unittest.skipUnless(sys.platform.startswith('linux'),
"Linux-specific test")
@unittest.skipUnless(os.environ.get('LLVMLITE_DIST_TEST'),
"Distribution-specific test")
def test_linux(self):
lib_path = ffi.lib._name
env = os.environ.copy()
env['LANG'] = 'C'
p = subprocess.Popen(["objdump", "-p", lib_path],
stdout=subprocess.PIPE, env=env)
out, _ = p.communicate()
self.assertEqual(0, p.returncode)
# Parse library dependencies
lib_pat = re.compile(r'^([+-_a-zA-Z0-9]+)\.so(?:\.\d+){0,3}$')
deps = set()
for line in out.decode().splitlines():
parts = line.split()
if parts and parts[0] == 'NEEDED':
dep = parts[1]
m = lib_pat.match(dep)
if len(parts) != 2 or not m:
self.fail("invalid NEEDED line: %r" % (line,))
deps.add(m.group(1))
# Sanity check that our dependencies were parsed ok
if 'libc' not in deps or 'libpthread' not in deps:
self.fail("failed parsing dependencies? got %r" % (deps,))
# Ensure all dependencies are expected
allowed = set(['librt', 'libdl', 'libpthread', 'libz', 'libm',
'libgcc_s', 'libc', 'ld-linux', 'ld64', 'libzstd',
'libstdc++'])
if platform.python_implementation() == 'PyPy':
allowed.add('libtinfo')
fails = []
for dep in deps:
if not dep.startswith('ld-linux-') and dep not in allowed:
fails.append(dep)
if len(fails) == 1:
self.fail("unexpected dependency %r in %r" % (fails[0], deps))
elif len(fails) > 1:
self.fail("unexpected dependencies %r in %r" % (fails, deps))
else:
pass # test passes
| TestDependencies |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/module_loaders/object_list.py | {
"start": 9463,
"end": 18168
} | class ____:
def __init__(
self,
loaded_objects: Sequence[LoadableDagsterDef],
):
self.loaded_defs = loaded_objects
@cached_property
def assets_defs_and_specs(self) -> Sequence[Union[AssetsDefinition, AssetSpec]]:
return [
asset
for asset in self.loaded_defs
if (isinstance(asset, AssetsDefinition) and asset.keys) or isinstance(asset, AssetSpec)
]
@cached_property
def assets_defs(self) -> Sequence[AssetsDefinition]:
return [asset for asset in self.loaded_defs if isinstance(asset, AssetsDefinition)]
@cached_property
def checks_defs(self) -> Sequence[AssetChecksDefinition]:
return [
cast("AssetChecksDefinition", asset)
for asset in self.loaded_defs
if isinstance(asset, AssetsDefinition) and has_only_asset_checks(asset)
]
@cached_property
def assets_defs_specs_and_checks_defs(
self,
) -> Sequence[Union[AssetsDefinition, AssetSpec, AssetChecksDefinition]]:
return [*self.assets_defs_and_specs, *self.checks_defs]
@cached_property
def source_assets(self) -> Sequence[Union[SourceAsset, AssetSpec]]:
return [
dagster_def
for dagster_def in self.loaded_defs
if isinstance(dagster_def, (SourceAsset, AssetSpec))
]
@cached_property
def cacheable_assets(self) -> Sequence[CacheableAssetsDefinition]:
return [
dagster_def
for dagster_def in self.loaded_defs
if isinstance(dagster_def, CacheableAssetsDefinition)
]
@cached_property
def sensors(self) -> Sequence[SensorDefinition]:
return [
dagster_def
for dagster_def in self.loaded_defs
if isinstance(dagster_def, SensorDefinition)
]
@cached_property
def schedules(
self,
) -> Sequence[Union[ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition]]:
return [
dagster_def
for dagster_def in self.loaded_defs
if isinstance(
dagster_def, (ScheduleDefinition, UnresolvedPartitionedAssetScheduleDefinition)
)
]
@cached_property
def jobs(self) -> Sequence[Union[JobDefinition, UnresolvedAssetJobDefinition]]:
return [
dagster_def
for dagster_def in self.loaded_defs
if isinstance(dagster_def, (JobDefinition, UnresolvedAssetJobDefinition))
]
@cached_property
def assets(
self,
) -> Sequence[Union[AssetsDefinition, SourceAsset, CacheableAssetsDefinition, AssetSpec]]:
return [
*self.assets_defs_and_specs,
*self.source_assets,
*self.cacheable_assets,
]
def get_objects(
self, filter_fn: Callable[[LoadableDagsterDef], bool]
) -> Sequence[LoadableDagsterDef]:
return [dagster_def for dagster_def in self.loaded_defs if filter_fn(dagster_def)]
def assets_with_loadable_prefix(
self, key_prefix: CoercibleToAssetKeyPrefix
) -> "DagsterObjectsList":
# There is a tricky edge case here where if a non-cacheable asset depends on a cacheable asset,
# and the assets are prefixed, the non-cacheable asset's dependency will not be prefixed since
# at prefix-time it is not known that its dependency is one of the cacheable assets.
# https://github.com/dagster-io/dagster/pull/10389#pullrequestreview-1170913271
result_list = []
all_asset_keys = {
key
for asset_object in self.assets_defs_specs_and_checks_defs
for key in key_iterator(asset_object, included_targeted_keys=True)
}
key_replacements = {key: key.with_prefix(key_prefix) for key in all_asset_keys}
for dagster_def in self.loaded_defs:
if isinstance(dagster_def, CacheableAssetsDefinition):
result_list.append(dagster_def.with_prefix_for_all(key_prefix))
elif isinstance(dagster_def, AssetsDefinition):
result_list.append(replace_keys_in_asset(dagster_def, key_replacements))
else:
# We don't replace the key for SourceAssets or AssetSpec objects (which can be thought of as the SourceAsset, or of course for non-asset objects.
result_list.append(dagster_def)
return DagsterObjectsList(result_list)
def assets_with_source_prefix(
self, key_prefix: CoercibleToAssetKeyPrefix
) -> "DagsterObjectsList":
result_list = []
key_replacements = {
source_asset.key: source_asset.key.with_prefix(key_prefix)
for source_asset in self.source_assets
}
for dagster_def in self.loaded_defs:
if isinstance(
dagster_def,
(AssetSpec, SourceAsset, AssetsDefinition),
):
result_list.append(replace_keys_in_asset(dagster_def, key_replacements))
else:
result_list.append(dagster_def)
return DagsterObjectsList(result_list)
def with_attributes(
self,
key_prefix: Optional[CoercibleToAssetKeyPrefix],
source_key_prefix: Optional[CoercibleToAssetKeyPrefix],
group_name: Optional[str],
legacy_freshness_policy: Optional[LegacyFreshnessPolicy],
automation_condition: Optional[AutomationCondition],
backfill_policy: Optional[BackfillPolicy],
) -> "DagsterObjectsList":
dagster_def_list = self.assets_with_loadable_prefix(key_prefix) if key_prefix else self
dagster_def_list = (
dagster_def_list.assets_with_source_prefix(source_key_prefix)
if source_key_prefix
else dagster_def_list
)
return_list = []
for dagster_def in dagster_def_list.loaded_defs:
if isinstance(dagster_def, AssetsDefinition):
new_asset = dagster_def.map_asset_specs(
_spec_mapper_disallow_group_override(group_name, automation_condition)
).with_attributes(
backfill_policy=backfill_policy, legacy_freshness_policy=legacy_freshness_policy
)
return_list.append(
new_asset.coerce_to_checks_def()
if has_only_asset_checks(new_asset)
else new_asset
)
elif isinstance(dagster_def, SourceAsset):
return_list.append(dagster_def.with_attributes(group_name=group_name))
elif isinstance(dagster_def, AssetSpec):
return_list.append(
_spec_mapper_disallow_group_override(group_name, automation_condition)(
dagster_def
)
)
elif isinstance(dagster_def, CacheableAssetsDefinition):
return_list.append(
dagster_def.with_attributes_for_all(
group_name,
legacy_freshness_policy=legacy_freshness_policy,
auto_materialize_policy=automation_condition.as_auto_materialize_policy()
if automation_condition
else None,
backfill_policy=backfill_policy,
)
)
else:
return_list.append(dagster_def)
return DagsterObjectsList(return_list)
def to_definitions_args(self) -> Mapping[str, Any]:
return {
"assets": self.assets,
"asset_checks": self.checks_defs,
"sensors": self.sensors,
"schedules": self.schedules,
"jobs": self.jobs,
}
def _spec_mapper_disallow_group_override(
group_name: Optional[str], automation_condition: Optional[AutomationCondition]
) -> Callable[[AssetSpec], AssetSpec]:
def _inner(spec: AssetSpec) -> AssetSpec:
if (
group_name is not None
and spec.group_name is not None
and group_name != spec.group_name
and spec.group_name != DEFAULT_GROUP_NAME
):
raise DagsterInvalidDefinitionError(
f"Asset spec {spec.key.to_user_string()} has group name {spec.group_name}, which conflicts with the group name {group_name} provided in load_assets_from_modules."
)
return spec.replace_attributes(
group_name=group_name if group_name else ...,
automation_condition=automation_condition if automation_condition else ...,
)
return _inner
| DagsterObjectsList |
python | doocs__leetcode | solution/3400-3499/3487.Maximum Unique Subarray Sum After Deletion/Solution.py | {
"start": 0,
"end": 302
} | class ____:
def maxSum(self, nums: List[int]) -> int:
mx = max(nums)
if mx <= 0:
return mx
ans = 0
s = set()
for x in nums:
if x < 0 or x in s:
continue
ans += x
s.add(x)
return ans
| Solution |
python | jazzband__django-pipeline | pipeline/templatetags/pipeline.py | {
"start": 552,
"end": 4362
} | class ____:
request = None
_request_var = None
@property
def request_var(self):
if not self._request_var:
self._request_var = template.Variable("request")
return self._request_var
def package_for(self, package_name, package_type):
package = {
"js": getattr(settings, "JAVASCRIPT", {}).get(package_name, {}),
"css": getattr(settings, "STYLESHEETS", {}).get(package_name, {}),
}[package_type]
if package:
package = {package_name: package}
packager = {
"js": Packager(css_packages={}, js_packages=package),
"css": Packager(css_packages=package, js_packages={}),
}[package_type]
return packager.package_for(package_type, package_name)
def render(self, context):
try:
self.request = self.request_var.resolve(context)
except VariableDoesNotExist:
pass
def render_compressed(self, package, package_name, package_type):
"""Render HTML for the package.
If ``PIPELINE_ENABLED`` is ``True``, this will render the package's
output file (using :py:meth:`render_compressed_output`). Otherwise,
this will render the package's source files (using
:py:meth:`render_compressed_sources`).
Subclasses can override this method to provide custom behavior for
determining what to render.
"""
if settings.PIPELINE_ENABLED:
return self.render_compressed_output(package, package_name, package_type)
else:
return self.render_compressed_sources(package, package_name, package_type)
def render_compressed_output(self, package, package_name, package_type):
"""Render HTML for using the package's output file.
Subclasses can override this method to provide custom behavior for
rendering the output file.
"""
method = getattr(self, f"render_{package_type}")
return method(package, package.output_filename)
def render_compressed_sources(self, package, package_name, package_type):
"""Render HTML for using the package's list of source files.
Each source file will first be collected, if
``PIPELINE_COLLECTOR_ENABLED`` is ``True``.
If there are any errors compiling any of the source files, an
``SHOW_ERRORS_INLINE`` is ``True``, those errors will be shown at
the top of the page.
Subclasses can override this method to provide custom behavior for
rendering the source files.
"""
if settings.PIPELINE_COLLECTOR_ENABLED:
default_collector.collect(self.request)
packager = Packager()
method = getattr(self, f"render_individual_{package_type}")
try:
paths = packager.compile(package.paths)
except CompilerError as e:
if settings.SHOW_ERRORS_INLINE:
method = getattr(self, f"render_error_{package_type}")
return method(package_name, e)
else:
raise
templates = packager.pack_templates(package)
return method(package, paths, templates=templates)
def render_error(self, package_type, package_name, e):
# Remove any ANSI escape sequences in the output.
error_output = re.sub(
re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"),
"",
e.error_output,
)
return render_to_string(
"pipeline/compile_error.html",
{
"package_type": package_type,
"package_name": package_name,
"command": subprocess.list2cmdline(e.command),
"errors": error_output,
},
)
| PipelineMixin |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_general_blocks.py | {
"start": 8273,
"end": 9402
} | class ____(util.MdCase):
"""Test some code related cases."""
extension = ['pymdownx.blocks.html', 'pymdownx.superfences']
def test_superfence_block(self):
"""Test blocks with fenced code content."""
self.check_markdown(
R'''
/// html | div
```python
import foo
```
///
''',
'''
<div>
<div class="highlight"><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">foo</span>
</code></pre></div>
</div>
''', # noqa: E501
True
)
def test_superfence_inline(self):
"""Test blocks with fenced code content."""
self.check_markdown(
R'''
/// html | span
```python
import foo
```
Other content
///
''',
'''
<span><code>python
import foo</code>
Other content</span>
''',
True
)
| TestCodeHandling |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image29.py | {
"start": 315,
"end": 903
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image29.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
0, 10, self.image_dir + "red_208.png", {"x_offset": -210, "y_offset": 1}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | wandb__wandb | wandb/sdk/wandb_run.py | {
"start": 3469,
"end": 3528
} | class ____(IntEnum):
EARLY = 1
LATE = 2
| TeardownStage |
python | skorch-dev__skorch | skorch/history.py | {
"start": 9587,
"end": 18268
} | class ____:
"""History for use in training using multiple processes
When using skorch with :class:`.AccelerateMixin` for multi GPU training, use
this class instead of the default :class:`.History` class.
When using PyTorch :class:`torch.nn.parallel.DistributedDataParallel`, the
whole training process is forked and batches are processed in parallel. That
means that the standard :class:`.History` does not see all the batches that
are being processed, which results in the different processes having
histories that are out of sync. This is bad because the history is used as a
reference to influence the training, e.g. to control early stopping.
This class solves the problem by using a distributed store from PyTorch,
e.g. :class:`torch.distributed.TCPStore`, to synchronize the batch
information across processes. This ensures that the information stored in
the individual history copies is identical for ``history[:, 'batches']``.
When it comes to the epoch-level information, it can still diverge between
processes (e.g. the recorded duration of the epoch).
To use this class, instantiate it and pass it as the ``history`` argument to
the net.
Click here for more information on `PyTorch distributed key-value stores
<https://pytorch.org/docs/stable/distributed.html#distributed-key-value-store>`_.
Notes
-----
If using this class results in the processes hanging or timing out, double
check that the ``rank`` and ``world_size`` arguments are set correctly.
Otherwise, the history instances will be waiting for records that are
actually never written.
If the speed of the processes is very uneven, there can also be timeouts. To
increase the waiting time, pass a corresponding ``timeout`` argument to the
``store`` instance.
Objects stored with this history make a json roundtrip. Therefore, if you
store objects that don't survive a json roundtrip (say, numpy arrays), don't
use this class.
The PyTorch ``Store`` classes cannot be pickled. Therefore, if a net using
this history class is pickled, the ``store`` attribute is discarded so that
the pickling does not fail. This means, however, that an unpickled net
cannot be used for further training without manually setting the ``store``
attribute on the ``history``.
Examples
--------
>>> # general
>>> from skorch import NeuralNetClassifier
>>> from torch.distributed import TCPStore
>>> from torch.nn.parallel import DistributedDataParallel
>>> def train(rank, world_size, is_master):
... store = TCPStore(
... "127.0.0.1", port=1234, world_size=world_size)
... dist_history = DistributedHistory(
... store=store, rank=rank, world_size=world_size)
... net = NeuralNetClassifier(..., history=dist_history)
... net.fit(X, y)
>>> # with accelerate
>>> from accelerate import Accelerator
>>> from skorch.hf import AccelerateMixin
>>> accelerator = Accelerator(...)
>>> def train(accelerator):
... is_master = accelerator.is_main_process
... world_size = accelerator.num_processes
... rank = accelerator.local_process_index
... store = TCPStore(
... "127.0.0.1", port=1234, world_size=world_size, is_master=is_master)
... dist_history = DistributedHistory(
... store=store, rank=rank, world_size=world_size)
... net = AcceleratedNet(..., history=dist_history)
... net.fit(X, y)
Parameters
----------
store : torch.distributed.Store
The torch distributed ``Store`` instance,
:class:`torch.distributed.TCPStore` has been tested to work.
rank : int
The rank of this particular process among all processes. Each process
should have a unique rank between 0 and ``world_size`` - 1. If using
``accelerate``, the rank can be determined as
``accelerator.local_process_index``.
world_size : int
The number of processes in the training. When using ``accelerate``, the
world size can be determined as ``accelerator.num_processes``.
Attributes
----------
history : skorch.history.History
The actual skorch ``History`` object can be accessed using the
:class:`.History` attribute. You should call ``net.history.sync()`` to
ensure that all data is synced into the history before reading from it.
"""
def __init__(self, *args, store, rank, world_size):
self.store = store
self.rank = rank
self.world_size = world_size
self._history = History(*args)
self._cur_batch = 0
self._sync_queue = []
@property
def history(self):
self.sync()
return self._history
def to_list(self):
return self.history.to_list()
@classmethod
def from_file(cls, f):
raise NotImplementedError
def to_file(self, f):
return self.history.to_file(f)
def __len__(self):
return len(self.history)
def __delitem__(self, i):
raise NotImplementedError
def clear(self):
self.history.clear()
def new_epoch(self):
self.sync()
self._history.new_epoch()
self._cur_batch = 0
def new_batch(self):
self.sync()
self._history.new_batch()
self._cur_batch += 1
def record(self, attr, value):
self._history.record(attr, value)
def record_batch(self, attr, value):
"""Add a new value to the given column for the current
batch.
Instead of writing to the history directly, write to the distributed
store. Then, once the history is being read from, the values from the
store are synchronized.
This class "remembers" which values were written to the store by
creating a key that uniquely identifies the values. Choosing a correct
key is crucial here, since it must not only be unique (say, a uuid), but
also sufficient to replay the history so that they can be recorded
correctly.
When the structure of the key is changed, it must be changed accordingly
inside of the sync method.
"""
rank = self.rank
batch = self._cur_batch
epoch = len(self._history)
key = [epoch, batch, rank, attr]
self.store.set(json.dumps(key), json.dumps(value))
# the queue not only stores the keys for this process, but the keys for
# all processes
for rank in range(self.world_size):
key = [epoch, batch, rank, attr]
self._sync_queue.append(key)
def __getitem__(self, i):
self.sync()
return self._history[i]
def sync(self):
"""Collect batch records across all ranks from store and write them to
the history
Syncing is not a single atomic operation, if something breaks the flow,
we can end up with an inconsistent state.
"""
if not self._sync_queue:
return
# this ensures that that the values are visited in a stable order
# grouped by batches: order by batch count and within the batches in the
# order of the rank and within the ranks in the alphabetical order of
# the attributes
keys_sorted = sorted(self._sync_queue)
keys_json = [json.dumps(key) for key in keys_sorted]
self.store.wait(keys_json)
batch_prev = None
rank_prev = None
for (_, batch, rank, attr), key_json in zip(keys_sorted, keys_json):
if batch_prev is None:
batch_prev = batch
if rank_prev is None:
rank_prev = rank
# each time the batch counter or rank changes, it means that the
# next batch needs to be started
if (batch != batch_prev) or (rank != rank_prev):
self._history.new_batch()
batch_prev = batch
rank_prev = rank
val = json.loads(self.store.get(key_json))
self._history.record_batch(attr, val)
self._sync_queue.clear()
def __getstate__(self):
state = self.__dict__.copy()
try:
# Unfortunately, TCPStore and FileStore are not pickleable. We still
# try, in case the user provides another type that can be pickled.
pickle.dumps(state['store'])
except (TypeError, pickle.PicklingError):
# TCPStore and FileStore raise TypeError, others could be
# PicklingError
state['store'] = None
return state
| DistributedHistory |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/common.py | {
"start": 8165,
"end": 13183
} | class ____(
gym.Wrapper[ObsType, ActType, ObsType, ActType], gym.utils.RecordConstructorArgs
):
"""A passive wrapper that surrounds the ``step``, ``reset`` and ``render`` functions to check they follow Gymnasium's API.
This wrapper is automatically applied during make and can be disabled with `disable_env_checker`.
No vector version of the wrapper exists.
Example:
>>> import gymnasium as gym
>>> env = gym.make("CartPole-v1")
>>> env
<TimeLimit<OrderEnforcing<PassiveEnvChecker<CartPoleEnv<CartPole-v1>>>>>
>>> env = gym.make("CartPole-v1", disable_env_checker=True)
>>> env
<TimeLimit<OrderEnforcing<CartPoleEnv<CartPole-v1>>>>
Change logs:
* v0.24.1 - Initially added however broken in several ways
* v0.25.0 - Bugs was all fixed
* v0.29.0 - Removed warnings for infinite bounds for Box observation and action spaces and inregular bound shapes
"""
def __init__(self, env: gym.Env[ObsType, ActType]):
"""Initialises the wrapper with the environments, run the observation and action space tests."""
gym.utils.RecordConstructorArgs.__init__(self)
gym.Wrapper.__init__(self, env)
if not isinstance(env, gym.Env):
if str(env.__class__.__base__) == "<class 'gym.core.Env'>":
raise TypeError(
"Gym is incompatible with Gymnasium, please update the environment class to `gymnasium.Env`. "
"See https://gymnasium.farama.org/introduction/create_custom_env/ for more info."
)
else:
raise TypeError(
f"The environment must inherit from the gymnasium.Env class, actual class: {type(env)}. "
"See https://gymnasium.farama.org/introduction/create_custom_env/ for more info."
)
if not hasattr(env, "action_space"):
raise AttributeError(
"The environment must specify an action space. https://gymnasium.farama.org/introduction/create_custom_env/"
)
check_action_space(env.action_space)
if not hasattr(env, "observation_space"):
raise AttributeError(
"The environment must specify an observation space. https://gymnasium.farama.org/introduction/create_custom_env/"
)
check_observation_space(env.observation_space)
self.checked_reset: bool = False
self.checked_step: bool = False
self.checked_render: bool = False
self.close_called: bool = False
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Steps through the environment that on the first call will run the `passive_env_step_check`."""
if self.checked_step is False:
self.checked_step = True
return env_step_passive_checker(self.env, action)
else:
return self.env.step(action)
def reset(
self, *, seed: int | None = None, options: dict[str, Any] | None = None
) -> tuple[ObsType, dict[str, Any]]:
"""Resets the environment that on the first call will run the `passive_env_reset_check`."""
if self.checked_reset is False:
self.checked_reset = True
return env_reset_passive_checker(self.env, seed=seed, options=options)
else:
return self.env.reset(seed=seed, options=options)
def render(self) -> RenderFrame | list[RenderFrame] | None:
"""Renders the environment that on the first call will run the `passive_env_render_check`."""
if self.checked_render is False:
self.checked_render = True
return env_render_passive_checker(self.env)
else:
return self.env.render()
@property
def spec(self) -> EnvSpec | None:
"""Modifies the environment spec to such that `disable_env_checker=False`."""
if self._cached_spec is not None:
return self._cached_spec
env_spec = self.env.spec
if env_spec is not None:
try:
env_spec = deepcopy(env_spec)
env_spec.disable_env_checker = False
except Exception as e:
gym.logger.warn(
f"An exception occurred ({e}) while copying the environment spec={env_spec}"
)
return None
self._cached_spec = env_spec
return env_spec
def close(self):
"""Warns if calling close on a closed environment fails."""
if not self.close_called:
self.close_called = True
return self.env.close()
else:
try:
return self.env.close()
except Exception as e:
logger.warn(
"Calling `env.close()` on the closed environment should be allowed, but it raised the following exception."
)
raise e
| PassiveEnvChecker |
python | django__django | tests/migrations/test_migrations_backwards_deps_1/0001_initial.py | {
"start": 35,
"end": 94
} | class ____(migrations.Migration):
operations = []
| Migration |
python | django__django | django/core/management/commands/migrate.py | {
"start": 671,
"end": 21359
} | class ____(BaseCommand):
autodetector = MigrationAutodetector
help = (
"Updates database schema. Manages both apps with migrations and those without."
)
def add_arguments(self, parser):
parser.add_argument(
"app_label",
nargs="?",
help="App label of an application to synchronize the state.",
)
parser.add_argument(
"migration_name",
nargs="?",
help="Database state will be brought to the state after that "
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
choices=tuple(connections),
help=(
'Nominates a database to synchronize. Defaults to the "default" '
"database."
),
)
parser.add_argument(
"--fake",
action="store_true",
help="Mark migrations as run without actually running them.",
)
parser.add_argument(
"--fake-initial",
action="store_true",
help=(
"Detect if tables already exist and fake-apply initial migrations if "
"so. Make sure that the current database schema matches your initial "
"migration before using this flag. Django will only check for an "
"existing table name."
),
)
parser.add_argument(
"--plan",
action="store_true",
help="Shows a list of the migration actions that will be performed.",
)
parser.add_argument(
"--run-syncdb",
action="store_true",
help="Creates tables for apps without migrations.",
)
parser.add_argument(
"--check",
action="store_true",
dest="check_unapplied",
help=(
"Exits with a non-zero status if unapplied migrations exist and does "
"not actually apply migrations."
),
)
parser.add_argument(
"--prune",
action="store_true",
dest="prune",
help="Delete nonexistent migrations from the django_migrations table.",
)
def get_check_kwargs(self, options):
kwargs = super().get_check_kwargs(options)
return {**kwargs, "databases": [options["database"]]}
@no_translations
def handle(self, *args, **options):
database = options["database"]
self.verbosity = options["verbosity"]
self.interactive = options["interactive"]
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module(".management", app_config.name)
# Get the database we're operating from
connection = connections[database]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their
# dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app) for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
run_syncdb = options["run_syncdb"]
target_app_labels_only = True
if options["app_label"]:
# Validate app_label.
app_label = options["app_label"]
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
if run_syncdb:
if app_label in executor.loader.migrated_apps:
raise CommandError(
"Can't use run_syncdb with app '%s' as it has migrations."
% app_label
)
elif app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations." % app_label)
if options["app_label"] and options["migration_name"]:
migration_name = options["migration_name"]
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(
app_label, migration_name
)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." % (migration_name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'."
% (migration_name, app_label)
)
target = (app_label, migration.name)
# Partially applied squashed migrations are not included in the
# graph, use the last replacement instead.
if (
target not in executor.loader.graph.nodes
and target in executor.loader.replacements
):
incomplete_migration = executor.loader.replacements[target]
target = incomplete_migration.replaces[-1]
targets = [target]
target_app_labels_only = False
elif options["app_label"]:
targets = [
key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label
]
else:
targets = executor.loader.graph.leaf_nodes()
if options["prune"]:
if not options["app_label"]:
raise CommandError(
"Migrations can be pruned only when an app is specified."
)
if self.verbosity > 0:
self.stdout.write("Pruning migrations:", self.style.MIGRATE_HEADING)
to_prune = sorted(
migration
for migration in set(executor.loader.applied_migrations)
- set(executor.loader.disk_migrations)
if migration[0] == app_label
)
squashed_migrations_with_deleted_replaced_migrations = [
migration_key
for migration_key, migration_obj in executor.loader.replacements.items()
if any(replaced in to_prune for replaced in migration_obj.replaces)
]
if squashed_migrations_with_deleted_replaced_migrations:
self.stdout.write(
self.style.NOTICE(
" Cannot use --prune because the following squashed "
"migrations have their 'replaces' attributes and may not "
"be recorded as applied:"
)
)
for migration in squashed_migrations_with_deleted_replaced_migrations:
app, name = migration
self.stdout.write(f" {app}.{name}")
self.stdout.write(
self.style.NOTICE(
" Re-run 'manage.py migrate' if they are not marked as "
"applied, and remove 'replaces' attributes in their "
"Migration classes."
)
)
else:
if to_prune:
for migration in to_prune:
app, name = migration
if self.verbosity > 0:
self.stdout.write(
self.style.MIGRATE_LABEL(f" Pruning {app}.{name}"),
ending="",
)
executor.recorder.record_unapplied(app, name)
if self.verbosity > 0:
self.stdout.write(self.style.SUCCESS(" OK"))
elif self.verbosity > 0:
self.stdout.write(" No migrations to prune.")
plan = executor.migration_plan(targets)
if options["plan"]:
self.stdout.write("Planned operations:", self.style.MIGRATE_LABEL)
if not plan:
self.stdout.write(" No planned migration operations.")
else:
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(
operation, backwards
)
style = self.style.WARNING if is_error else None
self.stdout.write(" " + message, style)
if options["check_unapplied"]:
sys.exit(1)
return
if options["check_unapplied"]:
if plan:
sys.exit(1)
return
if options["prune"]:
return
# At this point, ignore run_syncdb if there aren't any apps to sync.
run_syncdb = options["run_syncdb"] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
if options["app_label"]:
self.stdout.write(
self.style.MIGRATE_LABEL(
" Synchronize unmigrated app: %s" % app_label
)
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ")
+ (", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ")
+ (", ".join(sorted({a for a, n in targets})) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(
self.style.MIGRATE_LABEL(" Unapply all migrations: ")
+ str(targets[0][0])
)
else:
self.stdout.write(
self.style.MIGRATE_LABEL(" Target specific migration: ")
+ "%s, from %s" % (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity,
self.interactive,
connection.alias,
stdout=self.stdout,
apps=pre_migrate_apps,
plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(
self.style.MIGRATE_HEADING("Synchronizing apps without migrations:")
)
if options["app_label"]:
self.sync_apps(connection, [app_label])
else:
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them
# how to fix it.
autodetector = self.autodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(
self.style.NOTICE(
" Your models in app(s): %s have changes that are not "
"yet reflected in a migration, and so won't be "
"applied." % ", ".join(repr(app) for app in sorted(changes))
)
)
self.stdout.write(
self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
)
)
fake = False
fake_initial = False
else:
fake = options["fake"]
fake_initial = options["fake_initial"]
post_migrate_state = executor.migrate(
targets,
plan=plan,
state=pre_migrate_state.clone(),
fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all
# models are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple(
[ModelState.from_model(apps.get_model(*model)) for model in model_keys]
)
# Send the post_migrate signal, so individual apps can do whatever they
# need to do at this point.
emit_post_migrate_signal(
self.verbosity,
self.interactive,
connection.alias,
stdout=self.stdout,
apps=post_migrate_apps,
plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = (
" (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
)
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = (
" (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
)
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.monotonic()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = (
" (%.3fs)" % (time.monotonic() - self.start) if compute_time else ""
)
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(
app_config, connection.alias, include_auto_created=False
),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.identifier_converter
return not (
(converter(opts.db_table) in tables)
or (
opts.auto_created
and converter(opts.auto_created._meta.db_table) in tables
)
)
manifest = {
app_name: list(filter(model_installed, model_list))
for app_name, model_list in all_models
}
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model"
% (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(
" Creating table %s" % model._meta.db_table
)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...")
@staticmethod
def describe_operation(operation, backwards):
"""Return a string that describes a migration operation for --plan."""
prefix = ""
is_error = False
if hasattr(operation, "code"):
code = operation.reverse_code if backwards else operation.code
action = (code.__doc__ or "") if code else None
elif hasattr(operation, "sql"):
action = operation.reverse_sql if backwards else operation.sql
else:
action = ""
if backwards:
prefix = "Undo "
if action is not None:
action = str(action).replace("\n", "")
elif backwards:
action = "IRREVERSIBLE"
is_error = True
if action:
action = " -> " + action
truncated = Truncator(action)
return prefix + operation.describe() + truncated.chars(40), is_error
| Command |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/tests/test_credentials.py | {
"start": 7791,
"end": 8413
} | class ____(Block):
credentials: GcpCredentials
def get_configs(self):
"""
Returns the dbt configs, likely used eventually for writing to profiles.yml.
Returns:
A configs JSON.
"""
configs = self.credentials.model_dump()
for key in Block().model_dump():
configs.pop(key, None)
for key in configs.copy():
if key.startswith("_"):
configs.pop(key)
elif hasattr(configs[key], "get_secret_value"):
configs[key] = configs[key].get_secret_value()
return configs
| MockTargetConfigs |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_directive.py | {
"start": 776,
"end": 1649
} | class ____(dict[str, 'Callable[[str], str]']): # NoQA: FURB189
"""An option_spec allows any options."""
def __bool__(self) -> bool:
"""Behaves like some options are defined."""
return True
def __getitem__(self, _key: str) -> Callable[[str], str]:
return lambda x: x
def parse_generated_content(
state: RSTState, content: StringList, titles_allowed: bool
) -> list[Node]:
"""Parse an item of content generated by _auto_document_object()."""
with switch_source_input(state, content):
if titles_allowed:
return nested_parse_to_nodes(state, content)
node = nodes.paragraph()
# necessary so that the child nodes get the right source/line set
node.document = state.document
state.nested_parse(content, 0, node, match_titles=False)
return node.children
| DummyOptionSpec |
python | kamyu104__LeetCode-Solutions | Python/my-calendar-i.py | {
"start": 66,
"end": 663
} | class ____(object):
def __init__(self, start, end):
self.__start = start
self.__end = end
self.__left = None
self.__right = None
def insert(self, node):
if node.__start >= self.__end:
if not self.__right:
self.__right = node
return True
return self.__right.insert(node)
elif node.__end <= self.__start:
if not self.__left:
self.__left = node
return True
return self.__left.insert(node)
else:
return False
| Node |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/requests/main.py | {
"start": 1225,
"end": 1546
} | class ____(webapp2.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "text/plain"
for key, value in os.environ.iteritems():
self.response.out.write("{} = {}\n".format(key, value))
# [END gae_python_environment]
# [START gae_python_request_ids]
| PrintEnvironmentHandler |
python | langchain-ai__langchain | libs/core/langchain_core/tools/base.py | {
"start": 1843,
"end": 8064
} | class ____(TypeError):
"""Raised when args_schema is missing or has an incorrect type annotation."""
def _is_annotated_type(typ: type[Any]) -> bool:
"""Check if a type is an Annotated type.
Args:
typ: The type to check.
Returns:
`True` if the type is an Annotated type, `False` otherwise.
"""
return get_origin(typ) is typing.Annotated
def _get_annotation_description(arg_type: type) -> str | None:
"""Extract description from an Annotated type.
Args:
arg_type: The type to extract description from.
Returns:
The description string if found, None otherwise.
"""
if _is_annotated_type(arg_type):
annotated_args = get_args(arg_type)
for annotation in annotated_args[1:]:
if isinstance(annotation, str):
return annotation
return None
def _get_filtered_args(
inferred_model: type[BaseModel],
func: Callable,
*,
filter_args: Sequence[str],
include_injected: bool = True,
) -> dict:
"""Get filtered arguments from a function's signature.
Args:
inferred_model: The Pydantic model inferred from the function.
func: The function to extract arguments from.
filter_args: Arguments to exclude from the result.
include_injected: Whether to include injected arguments.
Returns:
Dictionary of filtered arguments with their schema definitions.
"""
schema = inferred_model.model_json_schema()["properties"]
valid_keys = signature(func).parameters
return {
k: schema[k]
for i, (k, param) in enumerate(valid_keys.items())
if k not in filter_args
and (i > 0 or param.name not in {"self", "cls"})
and (include_injected or not _is_injected_arg_type(param.annotation))
}
def _parse_python_function_docstring(
function: Callable, annotations: dict, *, error_on_invalid_docstring: bool = False
) -> tuple[str, dict]:
"""Parse function and argument descriptions from a docstring.
Assumes the function docstring follows Google Python style guide.
Args:
function: The function to parse the docstring from.
annotations: Type annotations for the function parameters.
error_on_invalid_docstring: Whether to raise an error on invalid docstring.
Returns:
A tuple containing the function description and argument descriptions.
"""
docstring = inspect.getdoc(function)
return _parse_google_docstring(
docstring,
list(annotations),
error_on_invalid_docstring=error_on_invalid_docstring,
)
def _validate_docstring_args_against_annotations(
arg_descriptions: dict, annotations: dict
) -> None:
"""Validate that docstring arguments match function annotations.
Args:
arg_descriptions: Arguments described in the docstring.
annotations: Type annotations from the function signature.
Raises:
ValueError: If a docstring argument is not found in function signature.
"""
for docstring_arg in arg_descriptions:
if docstring_arg not in annotations:
msg = f"Arg {docstring_arg} in docstring not found in function signature."
raise ValueError(msg)
def _infer_arg_descriptions(
fn: Callable,
*,
parse_docstring: bool = False,
error_on_invalid_docstring: bool = False,
) -> tuple[str, dict]:
"""Infer argument descriptions from function docstring and annotations.
Args:
fn: The function to infer descriptions from.
parse_docstring: Whether to parse the docstring for descriptions.
error_on_invalid_docstring: Whether to raise error on invalid docstring.
Returns:
A tuple containing the function description and argument descriptions.
"""
annotations = typing.get_type_hints(fn, include_extras=True)
if parse_docstring:
description, arg_descriptions = _parse_python_function_docstring(
fn, annotations, error_on_invalid_docstring=error_on_invalid_docstring
)
else:
description = inspect.getdoc(fn) or ""
arg_descriptions = {}
if parse_docstring:
_validate_docstring_args_against_annotations(arg_descriptions, annotations)
for arg, arg_type in annotations.items():
if arg in arg_descriptions:
continue
if desc := _get_annotation_description(arg_type):
arg_descriptions[arg] = desc
return description, arg_descriptions
def _is_pydantic_annotation(annotation: Any, pydantic_version: str = "v2") -> bool:
"""Check if a type annotation is a Pydantic model.
Args:
annotation: The type annotation to check.
pydantic_version: The Pydantic version to check against ("v1" or "v2").
Returns:
`True` if the annotation is a Pydantic model, `False` otherwise.
"""
base_model_class = BaseModelV1 if pydantic_version == "v1" else BaseModel
try:
return issubclass(annotation, base_model_class)
except TypeError:
return False
def _function_annotations_are_pydantic_v1(
signature: inspect.Signature, func: Callable
) -> bool:
"""Check if all Pydantic annotations in a function are from V1.
Args:
signature: The function signature to check.
func: The function being checked.
Returns:
True if all Pydantic annotations are from V1, `False` otherwise.
Raises:
NotImplementedError: If the function contains mixed V1 and V2 annotations.
"""
any_v1_annotations = any(
_is_pydantic_annotation(parameter.annotation, pydantic_version="v1")
for parameter in signature.parameters.values()
)
any_v2_annotations = any(
_is_pydantic_annotation(parameter.annotation, pydantic_version="v2")
for parameter in signature.parameters.values()
)
if any_v1_annotations and any_v2_annotations:
msg = (
f"Function {func} contains a mix of Pydantic v1 and v2 annotations. "
"Only one version of Pydantic annotations per function is supported."
)
raise NotImplementedError(msg)
return any_v1_annotations and not any_v2_annotations
| SchemaAnnotationError |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 40227,
"end": 58257
} | class ____(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
| BaseRequest |
python | doocs__leetcode | solution/3100-3199/3131.Find the Integer Added to Array I/Solution.py | {
"start": 0,
"end": 126
} | class ____:
def addedInteger(self, nums1: List[int], nums2: List[int]) -> int:
return min(nums2) - min(nums1)
| Solution |
python | ansible__ansible | test/integration/targets/templating/test_plugins/broken_test.py | {
"start": 134,
"end": 232
} | class ____:
def tests(self):
return {
'broken': Broken(),
}
| TestModule |
python | python-openxml__python-docx | tests/test_document.py | {
"start": 13481,
"end": 14353
} | class ____:
"""Unit-test suite for `docx.document._Body`."""
@pytest.mark.parametrize(
("cxml", "expected_cxml"),
[
("w:body", "w:body"),
("w:body/w:p", "w:body"),
("w:body/w:sectPr", "w:body/w:sectPr"),
("w:body/(w:p, w:sectPr)", "w:body/w:sectPr"),
],
)
def it_can_clear_itself_of_all_content_it_holds(
self, cxml: str, expected_cxml: str, document_: Mock
):
body = _Body(cast(CT_Body, element(cxml)), document_)
_body = body.clear_content()
assert body._body.xml == xml(expected_cxml)
assert _body is body
# -- fixtures --------------------------------------------------------------------------------
@pytest.fixture
def document_(self, request: FixtureRequest):
return instance_mock(request, Document)
| Describe_Body |
python | Textualize__textual | docs/examples/styles/max_width.py | {
"start": 116,
"end": 510
} | class ____(App):
CSS_PATH = "max_width.tcss"
def compose(self):
yield VerticalScroll(
Placeholder("max-width: 50h", id="p1"),
Placeholder("max-width: 999", id="p2"),
Placeholder("max-width: 50%", id="p3"),
Placeholder("max-width: 30", id="p4"),
)
if __name__ == "__main__":
app = MaxWidthApp()
app.run()
| MaxWidthApp |
python | kamyu104__LeetCode-Solutions | Python/make-sum-divisible-by-p.py | {
"start": 29,
"end": 586
} | class ____(object):
def minSubarray(self, nums, p):
"""
:type nums: List[int]
:type p: int
:rtype: int
"""
residue = sum(nums) % p
if not residue:
return 0
result = len(nums)
curr, lookup = 0, {0: -1}
for i, num in enumerate(nums):
curr = (curr+num) % p
lookup[curr] = i
if (curr-residue) % p in lookup:
result = min(result, i-lookup[(curr-residue)%p])
return result if result < len(nums) else -1
| Solution |
python | huggingface__transformers | src/transformers/models/sam2_video/modular_sam2_video.py | {
"start": 43623,
"end": 46082
} | class ____(Sam2ImageSegmentationOutput):
r"""
iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`):
The Intersection over Union (IoU) scores of the predicted masks.
pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`):
The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed
by the processor to be brought to the original image size.
object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`):
Logits for the object score, indicating if an object is present.
image_embeddings (`tuple(torch.FloatTensor)`):
The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each
tensor has shape `(batch_size, channels, height, width)`.
vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`.
Hidden-states of the vision model at the output of each stage.
vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the vision model.
mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights of the mask decoder.
high_res_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, image_size, image_size)`, *optional*):
The predicted masks, upscaled to the original image size. Only used for Sam2VideoModel.
object_pointer (`torch.FloatTensor` of shape `(batch_size, point_batch_size, hidden_size)`, *optional*):
A tensor representing the object pointer, used for tracking in videos. Only used for Sam2VideoModel.
"""
high_res_masks: Optional[torch.FloatTensor] = None
object_pointer: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(custom_intro="Base class for the Sam2 model's output.")
| Sam2VideoImageSegmentationOutput |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 1527,
"end": 1613
} | class ____(ASTNode):
keys: List[ASTNode]
values: List[ASTNode]
@dataclass
| VyDict |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/serve.py | {
"start": 25720,
"end": 25806
} | class ____(SettingsOverrideObject):
_default_class = ServeError404Base
| ServeError404 |
python | pytorch__pytorch | torch/_dynamo/aot_compile_types.py | {
"start": 64,
"end": 432
} | class ____(abc.ABC):
@classmethod
@abc.abstractmethod
def serialize_compile_artifacts(cls, fn: Any) -> bytes:
pass
@classmethod
@abc.abstractmethod
def deserialize_compile_artifacts(cls, data: bytes) -> Any:
pass
@abc.abstractmethod
def __call__(self, *args: Any, **kwargs: Any) -> Any:
pass
| SerializableCallable |
python | apache__airflow | airflow-core/src/airflow/cli/simple_table.py | {
"start": 4709,
"end": 5533
} | class ____(Table):
"""A rich Table with some default hardcoded for consistency."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.show_edge = kwargs.get("show_edge", False)
self.pad_edge = kwargs.get("pad_edge", False)
self.box = kwargs.get("box", ASCII_DOUBLE_HEAD)
self.show_header = kwargs.get("show_header", False)
self.title_style = kwargs.get("title_style", "bold green")
self.title_justify = kwargs.get("title_justify", "left")
self.caption = kwargs.get("caption", " ")
def add_column(self, *args, **kwargs) -> None:
"""Add a column to the table. We use different default."""
kwargs["overflow"] = kwargs.get("overflow") # to avoid truncating
super().add_column(*args, **kwargs)
| SimpleTable |
python | pydata__xarray | xarray/core/datatree.py | {
"start": 7388,
"end": 15630
} | class ____(Dataset):
"""
An immutable Dataset-like view onto the data in a single DataTree node.
In-place operations modifying this object should raise an AttributeError.
This requires overriding all inherited constructors.
Operations returning a new result will return a new xarray.Dataset object.
This includes all API on Dataset, which will be inherited.
"""
# TODO what happens if user alters (in-place) a DataArray they extracted from this object?
__slots__ = (
"_attrs",
"_cache", # used by _CachedAccessor
"_close",
"_coord_names",
"_dims",
"_encoding",
"_indexes",
"_variables",
)
def __init__(
self,
data_vars: Mapping[Any, Any] | None = None,
coords: Mapping[Any, Any] | None = None,
attrs: Mapping[Any, Any] | None = None,
):
raise AttributeError("DatasetView objects are not to be initialized directly")
@classmethod
def _constructor(
cls,
variables: dict[Any, Variable],
coord_names: set[Hashable],
dims: dict[Any, int],
attrs: dict | None,
indexes: dict[Any, Index],
encoding: dict | None,
close: Callable[[], None] | None,
) -> DatasetView:
"""Private constructor, from Dataset attributes."""
# We override Dataset._construct_direct below, so we need a new
# constructor for creating DatasetView objects.
obj: DatasetView = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._indexes = indexes
obj._attrs = attrs
obj._close = close
obj._encoding = encoding
return obj
def __setitem__(self, key, val) -> None:
raise AttributeError(
"Mutation of the DatasetView is not allowed, please use `.__setitem__` on the wrapping DataTree node, "
"or use `dt.to_dataset()` if you want a mutable dataset. If calling this from within `map_over_datasets`,"
"use `.copy()` first to get a mutable version of the input dataset."
)
def update(self, other) -> NoReturn:
raise AttributeError(
"Mutation of the DatasetView is not allowed, please use `.update` on the wrapping DataTree node, "
"or use `dt.to_dataset()` if you want a mutable dataset. If calling this from within `map_over_datasets`,"
"use `.copy()` first to get a mutable version of the input dataset."
)
def set_close(self, close: Callable[[], None] | None) -> None:
raise AttributeError("cannot modify a DatasetView()")
def close(self) -> None:
raise AttributeError(
"cannot close a DatasetView(). Close the associated DataTree node instead"
)
# FIXME https://github.com/python/mypy/issues/7328
@overload # type: ignore[override]
def __getitem__(self, key: Mapping) -> Dataset: # type: ignore[overload-overlap]
...
@overload
def __getitem__(self, key: Hashable) -> DataArray: ...
# See: https://github.com/pydata/xarray/issues/8855
@overload
def __getitem__(self, key: Any) -> Dataset: ...
def __getitem__(self, key) -> DataArray | Dataset:
# TODO call the `_get_item` method of DataTree to allow path-like access to contents of other nodes
# For now just call Dataset.__getitem__
return Dataset.__getitem__(self, key)
@classmethod
def _construct_direct( # type: ignore[override]
cls,
variables: dict[Any, Variable],
coord_names: set[Hashable],
dims: dict[Any, int] | None = None,
attrs: dict | None = None,
indexes: dict[Any, Index] | None = None,
encoding: dict | None = None,
close: Callable[[], None] | None = None,
) -> Dataset:
"""
Overriding this method (along with ._replace) and modifying it to return a Dataset object
should hopefully ensure that the return type of any method on this object is a Dataset.
"""
if dims is None:
dims = calculate_dimensions(variables)
if indexes is None:
indexes = {}
obj = object.__new__(Dataset)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._indexes = indexes
obj._attrs = attrs
obj._close = close
obj._encoding = encoding
return obj
def _replace( # type: ignore[override]
self,
variables: dict[Hashable, Variable] | None = None,
coord_names: set[Hashable] | None = None,
dims: dict[Any, int] | None = None,
attrs: dict[Hashable, Any] | Default | None = _default,
indexes: dict[Hashable, Index] | None = None,
encoding: dict | Default | None = _default,
inplace: bool = False,
) -> Dataset:
"""
Overriding this method (along with ._construct_direct) and modifying it to return a Dataset object
should hopefully ensure that the return type of any method on this object is a Dataset.
"""
if inplace:
raise AttributeError("In-place mutation of the DatasetView is not allowed")
return Dataset._replace(
self,
variables=variables,
coord_names=coord_names,
dims=dims,
attrs=attrs,
indexes=indexes,
encoding=encoding,
inplace=inplace,
)
def map( # type: ignore[override]
self,
func: Callable,
keep_attrs: bool | None = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> Dataset:
"""Apply a function to each data variable in this dataset
Parameters
----------
func : callable
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool | None, optional
If True, both the dataset's and variables' attributes (`attrs`) will be
copied from the original objects to the new ones. If False, the new dataset
and variables will be returned without copying the attributes.
args : iterable, optional
Positional arguments passed on to `func`.
**kwargs : Any
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` to each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])})
>>> ds
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 16B -1 2
>>> ds.map(np.fabs)
<xarray.Dataset> Size: 64B
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 48B 1.764 0.4002 0.9787 2.241 1.868 0.9773
bar (x) float64 16B 1.0 2.0
"""
# Copied from xarray.Dataset so as not to call type(self), which causes problems (see https://github.com/xarray-contrib/datatree/issues/188).
# TODO Refactor xarray upstream to avoid needing to overwrite this.
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
variables = {
k: maybe_wrap_array(v, func(v, *args, **kwargs))
for k, v in self.data_vars.items()
}
if keep_attrs:
for k, v in variables.items():
v._copy_attrs_from(self.data_vars[k])
attrs = self.attrs if keep_attrs else None
# return type(self)(variables, attrs=attrs)
return Dataset(variables, attrs=attrs)
FromDictDataValue: TypeAlias = "CoercibleValue | Dataset | DataTree | None"
@dataclass
| DatasetView |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 74678,
"end": 75677
} | class ____(_DynamicFixture, _fixtures.FixtureTest):
@testing.combinations(("star",), ("attronly",), argnames="type_")
def test_noload_issue(self, type_, user_address_fixture):
"""test #6420. a noload that hits the dynamic loader
should have no effect.
"""
User, Address = user_address_fixture()
s = fixture_session()
with expect_noload_deprecation():
if type_ == "star":
u1 = s.query(User).filter_by(id=7).options(noload("*")).first()
assert "name" not in u1.__dict__["name"]
elif type_ == "attronly":
u1 = (
s.query(User)
.filter_by(id=7)
.options(noload(User.addresses))
.first()
)
eq_(u1.__dict__["name"], "jack")
# noload doesn't affect a dynamic loader, because it has no state
eq_(list(u1.addresses), [Address(id=1)])
| DynamicTest |
python | tensorflow__tensorflow | tensorflow/python/data/util/traverse_test.py | {
"start": 1739,
"end": 4863
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.graph_only_combinations())
def testOnlySource(self):
ds = dataset_ops.Dataset.range(10)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertAllEqual(["RangeDataset"], [x.name for x in variant_tensor_ops])
@combinations.generate(test_base.graph_only_combinations())
def testSimplePipeline(self):
ds = dataset_ops.Dataset.range(10).map(math_ops.square)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["MapDataset", "RangeDataset"]),
set(x.name for x in variant_tensor_ops))
@combinations.generate(test_base.graph_only_combinations())
def testConcat(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = ds1.concatenate(ds2)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ConcatenateDataset", "RangeDataset", "RangeDataset_1"]),
set(x.name for x in variant_tensor_ops))
@combinations.generate(test_base.graph_only_combinations())
def testZip(self):
ds1 = dataset_ops.Dataset.range(10)
ds2 = dataset_ops.Dataset.range(10)
ds = dataset_ops.Dataset.zip((ds1, ds2))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["ZipDataset", "RangeDataset", "RangeDataset_1"]),
set(x.name for x in variant_tensor_ops))
@combinations.generate(test_base.graph_only_combinations())
def testMultipleVariantTensors(self):
ds = dataset_ops.Dataset.range(10)
ds = _TestDataset(ds)
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds)
self.assertSetEqual(
set(["RangeDataset", "ModelDataset", "PrefetchDataset"]),
set(x.name for x in variant_tensor_ops))
@combinations.generate(test_base.graph_only_combinations())
def testFlatMap(self):
ds1 = dataset_ops.Dataset.range(10).repeat(10)
def map_fn(ds):
def _map(x):
return ds.batch(x)
return _map
ds2 = dataset_ops.Dataset.range(20).prefetch(1)
ds2 = ds2.flat_map(map_fn(ds1))
variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(ds2)
self.assertSetEqual(
set([
"FlatMapDataset", "PrefetchDataset", "RepeatDataset",
"RangeDataset", "RangeDataset_1"
]), set(x.name for x in variant_tensor_ops))
@combinations.generate(test_base.graph_only_combinations())
def testTfDataService(self):
ds = dataset_ops.Dataset.range(10)
ds = ds.apply(
data_service_ops.distribute("parallel_epochs", "grpc://foo:0"))
ops = traverse.obtain_capture_by_value_ops(ds)
data_service_dataset_op = ("DataServiceDatasetV4"
if compat.forward_compatible(2022, 8, 31) else
"DataServiceDatasetV3")
self.assertContainsSubset(
["RangeDataset", data_service_dataset_op, "DummyIterationCounter"],
set(x.name for x in ops))
if __name__ == "__main__":
test.main()
| TraverseTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 23554,
"end": 23762
} | class ____(SemiIncrementalMixin, GithubStream):
"""
API docs: https://docs.github.com/en/rest/activity/events?apiVersion=2022-11-28#list-repository-events
"""
cursor_field = "created_at"
| Events |
python | openai__gym | gym/wrappers/clip_action.py | {
"start": 147,
"end": 1155
} | class ____(ActionWrapper):
"""Clip the continuous action within the valid :class:`Box` observation space bound.
Example:
>>> import gym
>>> env = gym.make('Bipedal-Walker-v3')
>>> env = ClipAction(env)
>>> env.action_space
Box(-1.0, 1.0, (4,), float32)
>>> env.step(np.array([5.0, 2.0, -10.0, 0.0]))
# Executes the action np.array([1.0, 1.0, -1.0, 0]) in the base environment
"""
def __init__(self, env: gym.Env):
"""A wrapper for clipping continuous actions within the valid bound.
Args:
env: The environment to apply the wrapper
"""
assert isinstance(env.action_space, Box)
super().__init__(env)
def action(self, action):
"""Clips the action within the valid bounds.
Args:
action: The action to clip
Returns:
The clipped action
"""
return np.clip(action, self.action_space.low, self.action_space.high)
| ClipAction |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_schedule.py | {
"start": 18880,
"end": 23251
} | class ____:
@pytest.fixture
def mock_project_chooser(self) -> Mock:
"""Create a mock ProjectChooser."""
return Mock(spec=ProjectChooser)
def test_chosen_projects_context_manager(self, mock_project_chooser: Mock) -> None:
"""Test chosen_projects as a context manager."""
# Setup mocks
mock_cohort_updates = Mock(spec=CohortUpdates)
mock_buffer_client = Mock(spec=DelayedWorkflowClient)
mock_project_chooser.client = mock_buffer_client
mock_buffer_client.fetch_updates.return_value = mock_cohort_updates
fetch_time = 1000.0
all_project_ids = [1, 2, 3, 4, 5]
expected_result = [1, 2, 3]
mock_project_chooser.project_ids_to_process.return_value = expected_result
# Use context manager
with chosen_projects(mock_project_chooser, fetch_time, all_project_ids) as result:
project_ids_to_process = result
# Verify fetch_updates was called on project_chooser.client
mock_buffer_client.fetch_updates.assert_called_once()
# Verify project_ids_to_process was called with correct args
mock_project_chooser.project_ids_to_process.assert_called_once_with(
fetch_time, mock_cohort_updates, all_project_ids
)
# Verify the result
assert project_ids_to_process == expected_result
# Verify persist_updates was called after context exit
mock_buffer_client.persist_updates.assert_called_once_with(mock_cohort_updates)
def test_chosen_projects_fetch_updates_exception(self, mock_project_chooser: Mock) -> None:
"""Test that exception during fetch_updates is properly handled."""
# Setup mocks
mock_buffer_client = Mock(spec=DelayedWorkflowClient)
mock_project_chooser.client = mock_buffer_client
# Make fetch_updates raise an exception (e.g. key doesn't exist)
mock_buffer_client.fetch_updates.side_effect = Exception("Key not found")
fetch_time = 1000.0
all_project_ids = [1, 2, 3, 4, 5]
# Should raise the exception from fetch_updates
with pytest.raises(Exception, match="Key not found"):
with chosen_projects(mock_project_chooser, fetch_time, all_project_ids):
pass
# persist_updates should not be called if fetch_updates fails
mock_buffer_client.persist_updates.assert_not_called()
def test_chosen_projects_exception_during_processing(self, mock_project_chooser: Mock) -> None:
mock_buffer_client = Mock(spec=DelayedWorkflowClient)
mock_project_chooser.client = mock_buffer_client
mock_buffer_client.fetch_updates.return_value = Mock(spec=CohortUpdates)
mock_project_chooser.project_ids_to_process.return_value = [1, 2, 3]
with pytest.raises(RuntimeError, match="Processing failed"):
with chosen_projects(mock_project_chooser, 1000.0, [1, 2, 3, 4, 5]):
raise RuntimeError("Processing failed")
mock_buffer_client.persist_updates.assert_not_called()
@override_options({"workflow_engine.scheduler.use_conditional_delete": True})
def test_mark_projects_processed_only_cleans_up_processed_projects() -> None:
"""Test that mark_projects_processed only cleans up processed projects, not all projects."""
processed_project_id = 5000
unprocessed_project_id = 5001
current_time = 1000.0
def get_fake_time() -> float:
return current_time
all_project_ids_and_timestamps = {
processed_project_id: [1000.0],
unprocessed_project_id: [2000.0],
}
client = DelayedWorkflowClient(RedisHashSortedSetBuffer(now_fn=get_fake_time))
# Add both projects to buffer
for project_id, [timestamp] in all_project_ids_and_timestamps.items():
current_time = timestamp
client.add_project_ids([project_id])
# Only mark one project as processed
mark_projects_processed(
client,
[processed_project_id], # Only this one was processed
all_project_ids_and_timestamps,
)
# The unprocessed project should still be in buffer
remaining_project_ids = client.get_project_ids(min=0, max=3000.0)
assert unprocessed_project_id in remaining_project_ids
assert processed_project_id not in remaining_project_ids
| TestChosenProjects |
python | sphinx-doc__sphinx | sphinx/transforms/post_transforms/images.py | {
"start": 4388,
"end": 5833
} | class ____(BaseImageConverter):
default_priority = 150
def match(self, node: nodes.image) -> bool:
if self.env._builder_cls.supported_data_uri_images is True:
return False # do not transform the image; data URIs are valid in the build output
return node['uri'].startswith('data:')
def handle(self, node: nodes.image) -> None:
image = parse_data_uri(node['uri'])
assert image is not None
ext = get_image_extension(image.mimetype)
if ext is None:
logger.warning(
__('Unknown image format: %s...'), node['uri'][:32], location=node
)
return
ensuredir(self.imagedir / 'embeded')
digest = sha1(image.data, usedforsecurity=False).hexdigest()
path = self.imagedir / 'embeded' / (digest + ext)
self.env.original_image_uri[path] = node['uri']
with open(path, 'wb') as f:
f.write(image.data)
path_str = str(path)
node['candidates'].pop('?')
node['candidates'][image.mimetype] = path_str
node['uri'] = path_str
self.env.images.add_file(self.env.current_document.docname, path_str)
def get_filename_for(filename: str, mimetype: str) -> str:
basename = os.path.basename(filename)
basename = CRITICAL_PATH_CHAR_RE.sub('_', basename)
return os.path.splitext(basename)[0] + (get_image_extension(mimetype) or '')
| DataURIExtractor |
python | tensorflow__tensorflow | third_party/xla/xla/backends/cpu/codegen/fusion_emitter_test.py | {
"start": 921,
"end": 7614
} | class ____(parameterized.TestCase):
def test_basic_add_sub(self):
dtype = np.dtype(np.float32)
hlo = """
HloModule test_module
fusion_computation {
%param_0 = f32[100, 200] parameter(0)
%param_1 = f32[100, 200] parameter(1)
%param_2 = f32[100, 200] parameter(2)
%add = f32[100, 200] add(%param_0, %param_1)
%sub = f32[100, 200] subtract(%add, %param_2)
ROOT %tuple = (f32[100, 200], f32[100, 200]) tuple(%add, %sub)
}
ENTRY main {
%param_0 = f32[100, 200] parameter(0)
%param_1 = f32[100, 200] parameter(1)
%param_2 = f32[100, 200] parameter(2)
ROOT %wrapped_fusion = (f32[100, 200], f32[100, 200])
fusion(%param_0, %param_1, %param_2),
kind=kLoop, calls=%fusion_computation
}
"""
hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
mlir_context = testlib_cpu.MLIRContext()
kernel_definition = testlib_cpu.emit_fusion_kernel(
mlir_context,
hlo_module.get_root_instruction(),
buffer_assignment,
False,
)
kernel_runner = testlib_cpu.KernelRunner.create(
kernel_definition, jit_compiler
)
operand_shape = (100, 200)
param_0 = base_utilities.create_literal_from_np(
np.random.rand(*operand_shape).astype(dtype)
)
param_1 = base_utilities.create_literal_from_np(
np.random.rand(*operand_shape).astype(dtype)
)
param_2 = base_utilities.create_literal_from_np(
np.random.rand(*operand_shape).astype(dtype)
)
result_0 = base_utilities.create_literal_from_np(
np.zeros(operand_shape, dtype)
)
result_1 = base_utilities.create_literal_from_np(
np.zeros(operand_shape, dtype)
)
kernel_runner.call([param_0, param_1, param_2, result_0, result_1])
np.testing.assert_array_almost_equal(
np.asarray(result_0), np.add(param_0, param_1)
)
np.testing.assert_array_almost_equal(
np.asarray(result_1), np.subtract(np.add(param_0, param_1), param_2)
)
def test_convert_f32_bf16_f32(self):
dtype = np.dtype(np.float32)
hlo = """
HloModule test_module
fusion_computation {
%param = f32[300, 500] parameter(0)
%convert_bf16 = bf16[300, 500] convert(%param)
ROOT %convert_f32 = f32[300, 500] convert(%convert_bf16)
}
ENTRY main {
%param = f32[300, 500] parameter(0)
ROOT %wrapped_fusion = f32[300, 500]
fusion(%param),
kind=kLoop, calls=%fusion_computation
}
"""
hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
mlir_context = testlib_cpu.MLIRContext()
kernel_definition = testlib_cpu.emit_fusion_kernel(
mlir_context,
hlo_module.get_root_instruction(),
buffer_assignment,
False,
)
kernel_runner = testlib_cpu.KernelRunner.create(
kernel_definition, jit_compiler
)
operand_shape = (300, 500)
# np.rand produces values in the range [0, 1).
# Bias and scale it to get more representative values.
param = base_utilities.create_literal_from_np(
(np.random.rand(*operand_shape).astype(dtype) - 0.5) * 1e9
)
# Also make sure inf is preserved.
np.asarray(param)[0, 0] = np.inf
result = base_utilities.create_literal_from_np(
np.zeros(operand_shape, dtype)
)
kernel_runner.call([param, result])
# Based on the 7-bit fraction of bf16.
bf_16_rtol = 2**-8
np.testing.assert_allclose(result, param, rtol=bf_16_rtol)
# Check that user provided nans and nans produced by internal ops are
# preserved.
def test_convert_f32_bf16_f32_nan(self):
dtype = np.dtype(np.float32)
hlo = """
HloModule test_module
fusion_computation {
%param = f32[3] parameter(0)
%div = f32[3] divide(%param, %param)
%convert_bf16 = bf16[3] convert(%div)
ROOT %convert_f32 = f32[3] convert(%convert_bf16)
}
ENTRY main {
%param = f32[3] parameter(0)
ROOT %wrapped_fusion = f32[3]
fusion(%param),
kind=kLoop, calls=%fusion_computation
}
"""
hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
mlir_context = testlib_cpu.MLIRContext()
kernel_definition = testlib_cpu.emit_fusion_kernel(
mlir_context,
hlo_module.get_root_instruction(),
buffer_assignment,
False,
)
kernel_runner = testlib_cpu.KernelRunner.create(
kernel_definition, jit_compiler
)
operand_shape = (3,)
param = base_utilities.create_literal_from_np(
np.ndarray(operand_shape, dtype)
)
np_param = np.asarray(param)
np_param[0] = 1
np_param[1] = 0
np_param[2] = np.nan
result = base_utilities.create_literal_from_np(
np.zeros(operand_shape, dtype)
)
kernel_runner.call([param, result])
np_result = np.asarray(result)
self.assertEqual(np_result[0], 1)
self.assertTrue(np.isnan(np_result[1]))
self.assertTrue(np.isnan(np_result[2]))
# Check that a constant with a layout is respected.
def test_constant_with_layout(self):
dtype = np.dtype(np.float32)
hlo = """
HloModule test_module
fusion_computation {
%constant = f32[2, 2]{0, 1} constant({{0, 1}, {2, 3}})
ROOT %result = f32[2, 2]{1, 0} copy(%constant)
}
ENTRY main {
ROOT %wrapped_fusion =
f32[2, 2] fusion(), kind=kLoop, calls=%fusion_computation
}
"""
hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo)
jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config())
mlir_context = testlib_cpu.MLIRContext()
kernel_definition = testlib_cpu.emit_fusion_kernel(
mlir_context,
hlo_module.get_root_instruction(),
buffer_assignment,
False,
)
kernel_runner = testlib_cpu.KernelRunner.create(
kernel_definition, jit_compiler
)
shape = (2, 2)
result = base_utilities.create_literal_from_np(np.zeros(shape, dtype))
kernel_runner.call([result])
np_result = np.asarray(result)
self.assertEqual(np_result[0, 0], 0)
self.assertEqual(np_result[0, 1], 1)
self.assertEqual(np_result[1, 0], 2)
self.assertEqual(np_result[1, 1], 3)
| LoopFusionTest |
python | joke2k__faker | faker/providers/internet/en_US/__init__.py | {
"start": 46,
"end": 89
} | class ____(InternetProvider):
pass
| Provider |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 15811,
"end": 16922
} | class ____(SphinxLogRecordTranslator):
"""LogRecordTranslator for WARNING level log records."""
LogRecordClass = SphinxWarningLogRecord
def filter(self, record: SphinxWarningLogRecord) -> bool: # type: ignore[override]
ret = super().filter(record)
try:
show_warning_types = self._app.config.show_warning_types
except AttributeError:
# config is not initialized yet (ex. in conf.py)
show_warning_types = False
if show_warning_types:
if log_type := getattr(record, 'type', ''):
if log_subtype := getattr(record, 'subtype', ''):
record.msg += f' [{log_type}.{log_subtype}]'
else:
record.msg += f' [{log_type}]'
return ret
def get_node_location(node: Node) -> str | None:
source, line = get_source_line(node)
if source and line:
return f'{os.path.abspath(source)}:{line}'
if source:
return f'{os.path.abspath(source)}:'
if line:
return f'<unknown>:{line}'
return None
| WarningLogRecordTranslator |
python | pypa__pip | src/pip/_vendor/truststore/_windows.py | {
"start": 1973,
"end": 2381
} | class ____(Structure):
_fields_ = (
("cbSize", DWORD),
("pCertContext", PCERT_CONTEXT),
("TrustStatus", CERT_TRUST_STATUS),
("pRevocationInfo", c_void_p),
("pIssuanceUsage", PCERT_ENHKEY_USAGE),
("pApplicationUsage", PCERT_ENHKEY_USAGE),
("pwszExtendedErrorInfo", LPCWSTR),
)
PCERT_CHAIN_ELEMENT = POINTER(CERT_CHAIN_ELEMENT)
| CERT_CHAIN_ELEMENT |
python | davidhalter__parso | parso/python/pep8.py | {
"start": 33623,
"end": 33779
} | class ____(Rule):
code = 392
message = 'Blank line at end of file'
def is_issue(self, leaf):
return self._newline_count >= 2
| BlankLineAtEnd |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict12.py | {
"start": 266,
"end": 316
} | class ____(TypedDict):
bar: NotRequired[str]
| TD1 |
python | viewflow__viewflow | tests/_cases/test_workflow_undo_handle.py | {
"start": 2945,
"end": 3088
} | class ____(flow.Flow):
start = flow.StartHandle().Next(this.handle)
handle = flow.Handle().Next(this.end)
end = flow.End()
| TestUndoFlow |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 22722,
"end": 23207
} | class ____(CommentedMapView, Set): # type: ignore
__slots__ = ()
@classmethod
def _from_iterable(self, it):
# type: (Any) -> Any
return set(it)
def __contains__(self, key):
# type: (Any) -> Any
return key in self._mapping
def __iter__(self):
# type: () -> Any # yield from self._mapping # not in py27, pypy
# for x in self._mapping._keys():
for x in self._mapping:
yield x
| CommentedMapKeysView |
python | sympy__sympy | sympy/polys/series/base.py | {
"start": 5677,
"end": 8043
} | class ____(
PowerSeriesRingProto[TSeries, Ef], Protocol[TSeries, Ef]
):
"""A protocol for a power series ring over a field."""
def __call__(
self, coeffs: Sequence[Ef | int | tuple[int, int]], prec: int | None = None
) -> TSeries:
"""Return a power series element over a field."""
...
def integrate(self, s: TSeries, /) -> TSeries:
"""Return the integral of a power series."""
...
def sqrt(self, s: TSeries, /) -> TSeries:
"""Return the square root of a power seris."""
...
def log(self, s: TSeries, /) -> TSeries:
"""Return the logarithm of a power series."""
...
def log1p(self, s: TSeries, /) -> TSeries:
"""Return the logarithm of (1 + s) for a power series with constant term."""
...
def exp(self, s: TSeries, /) -> TSeries:
"""Return the exponential of a power series."""
...
def expm1(self, s: TSeries, /) -> TSeries:
"""Return the exponential of a power series minus 1."""
...
def atan(self, s: TSeries, /) -> TSeries:
"""Return the arctangent of a power series."""
...
def atanh(self, s: TSeries, /) -> TSeries:
"""Return the hyperbolic arctangent of a power series."""
...
def asin(self, s: TSeries, /) -> TSeries:
"""Return the arcsine of a power series."""
...
def asinh(self, s: TSeries, /) -> TSeries:
"""Return the hyperbolic arcsine of a power series."""
...
def tan(self, s: TSeries, /) -> TSeries:
"""Return the tangent of a power series."""
...
def tanh(self, s: TSeries, /) -> TSeries:
"""Return the hyperbolic tangent of a power series."""
...
def sin(self, s: TSeries, /) -> TSeries:
"""Return the sine of a power series."""
...
def sinh(self, s: TSeries, /) -> TSeries:
"""Return the hyperbolic sine of a power series."""
...
def cos(self, s: TSeries, /) -> TSeries:
"""Return the cosine of a power series."""
...
def cosh(self, s: TSeries, /) -> TSeries:
"""Return the hyperbolic cosine of a power series."""
...
def hypot(self, s1: TSeries, s2: TSeries, /) -> TSeries:
"""Return the hypotenuse of two power series."""
...
| PowerSeriesRingFieldProto |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 21760,
"end": 22663
} | class ____(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
| PyCodeObjectPtr |
python | wandb__wandb | wandb/automations/_filters/run_metrics.py | {
"start": 3870,
"end": 4931
} | class ____(BaseMetricFilter): # from: RunMetricThresholdFilter
"""Filter that compares an **absolute** metric value against a user-defined threshold.
The value may be a single value or an aggregated result over a window of
multiple values.
"""
name: str
agg: Annotated[Optional[Agg], Field(alias="agg_op")] = None
window: Annotated[PositiveInt, Field(alias="window_size")] = 1
cmp: Annotated[Literal["$gte", "$gt", "$lt", "$lte"], Field(alias="cmp_op")]
"""Comparison operator between the metric value (left) vs. the threshold (right)."""
threshold: Union[StrictInt, StrictFloat]
@field_validator("cmp", mode="before")
def _validate_cmp(cls, v: Any) -> Any:
# Be helpful: e.g. ">" -> "$gt"
return PY2MONGO_OPS.get(v.strip(), v) if isinstance(v, str) else v
def __repr__(self) -> str:
metric = f"{self.agg.value}({self.name})" if self.agg else self.name
op = MONGO2PY_OPS.get(self.cmp, self.cmp)
return repr(rf"{metric} {op} {self.threshold}")
| MetricThresholdFilter |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/bundles/gcs.py | {
"start": 1173,
"end": 6283
} | class ____(BaseDagBundle):
"""
GCS Dag bundle - exposes a directory in GCS as a Dag bundle.
This allows Airflow to load Dags directly from a GCS bucket.
:param gcp_conn_id: Airflow connection ID for GCS. Defaults to GoogleBaseHook.default_conn_name.
:param bucket_name: The name of the GCS bucket containing the Dag files.
:param prefix: Optional subdirectory within the GCS bucket where the Dags are stored.
If None, Dags are assumed to be at the root of the bucket (Optional).
"""
supports_versioning = False
def __init__(
self,
*,
gcp_conn_id: str = GoogleBaseHook.default_conn_name,
bucket_name: str,
prefix: str = "",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.bucket_name = bucket_name
self.prefix = prefix
# Local path where GCS Dags are downloaded
self.gcs_dags_dir: Path = self.base_dir
log = structlog.get_logger(__name__)
self._log = log.bind(
bundle_name=self.name,
version=self.version,
bucket_name=self.bucket_name,
prefix=self.prefix,
gcp_conn_id=self.gcp_conn_id,
)
self._gcs_hook: GCSHook | None = None
def _initialize(self):
with self.lock():
if not self.gcs_dags_dir.exists():
self._log.info("Creating local Dags directory: %s", self.gcs_dags_dir)
os.makedirs(self.gcs_dags_dir)
if not self.gcs_dags_dir.is_dir():
raise NotADirectoryError(f"Local Dags path: {self.gcs_dags_dir} is not a directory.")
try:
self.gcs_hook.get_bucket(bucket_name=self.bucket_name)
except NotFound:
raise ValueError(f"GCS bucket '{self.bucket_name}' does not exist.")
if self.prefix:
# don't check when prefix is ""
if not self.gcs_hook.list(bucket_name=self.bucket_name, prefix=self.prefix):
raise ValueError(f"GCS prefix 'gs://{self.bucket_name}/{self.prefix}' does not exist.")
self.refresh()
def initialize(self) -> None:
self._initialize()
super().initialize()
@property
def gcs_hook(self):
if self._gcs_hook is None:
try:
self._gcs_hook: GCSHook = GCSHook(gcp_conn_id=self.gcp_conn_id) # Initialize GCS hook.
except AirflowException as e:
self._log.warning("Could not create GCSHook for connection %s: %s", self.gcp_conn_id, e)
return self._gcs_hook
def __repr__(self):
return (
f"<GCSDagBundle("
f"name={self.name!r}, "
f"bucket_name={self.bucket_name!r}, "
f"prefix={self.prefix!r}, "
f"version={self.version!r}"
f")>"
)
def get_current_version(self) -> str | None:
"""Return the current version of the Dag bundle. Currently not supported."""
return None
@property
def path(self) -> Path:
"""Return the local path to the Dag files."""
return self.gcs_dags_dir # Path where Dags are downloaded.
def refresh(self) -> None:
"""Refresh the Dag bundle by re-downloading the Dags from GCS."""
if self.version:
raise ValueError("Refreshing a specific version is not supported")
with self.lock():
self._log.debug(
"Downloading Dags from gs://%s/%s to %s", self.bucket_name, self.prefix, self.gcs_dags_dir
)
self.gcs_hook.sync_to_local_dir(
bucket_name=self.bucket_name,
prefix=self.prefix,
local_dir=self.gcs_dags_dir,
delete_stale=True,
)
def view_url(self, version: str | None = None) -> str | None:
"""
Return a URL for viewing the Dags in GCS. Currently, versioning is not supported.
This method is deprecated and will be removed when the minimum supported Airflow version is 3.1.
Use `view_url_template` instead.
"""
return self.view_url_template()
def view_url_template(self) -> str | None:
"""Return a URL for viewing the Dags in GCS. Currently, versioning is not supported."""
if self.version:
raise ValueError("GCS url with version is not supported")
if hasattr(self, "_view_url_template") and self._view_url_template:
# Because we use this method in the view_url method, we need to handle
# backward compatibility for Airflow versions that doesn't have the
# _view_url_template attribute. Should be removed when we drop support for Airflow 3.0
return self._view_url_template
# https://console.cloud.google.com/storage/browser/<bucket-name>/<prefix>
url = f"https://console.cloud.google.com/storage/browser/{self.bucket_name}"
if self.prefix:
url += f"/{self.prefix}"
return url
| GCSDagBundle |
python | PrefectHQ__prefect | tests/client/schemas/test_schedules.py | {
"start": 382,
"end": 4270
} | class ____:
def test_construct_interval_schedule(self):
interval = 300 # 5 minutes
result = construct_schedule(interval=interval)
assert isinstance(result, IntervalSchedule)
assert result.interval == datetime.timedelta(seconds=interval)
def test_construct_cron_schedule(self):
cron_string = "0 0 * * *"
result = construct_schedule(cron=cron_string)
assert isinstance(result, CronSchedule)
assert result.cron == cron_string
def test_construct_rrule_schedule(self):
rrule_string = "FREQ=DAILY;COUNT=2"
result = construct_schedule(rrule=rrule_string)
assert isinstance(result, RRuleSchedule)
assert result.rrule == rrule_string
@pytest.mark.parametrize(
"kwargs",
[
{**d1, **d2}
for d1, d2 in combinations(
[
{"interval": 3600},
{"cron": "* * * * *"},
{"rrule": "FREQ=MINUTELY"},
],
2,
)
],
)
def test_multiple_schedules_error(self, kwargs):
with pytest.raises(
ValueError, match="Only one of interval, cron, or rrule can be provided."
):
construct_schedule(**kwargs)
def test_anchor_date_without_interval_error(self):
with pytest.raises(
ValueError,
match="An anchor date can only be provided with an interval schedule",
):
construct_schedule(anchor_date="2023-01-01")
def test_timezone_without_schedule_error(self):
with pytest.raises(
ValueError,
match="A timezone can only be provided with interval, cron, or rrule",
):
construct_schedule(timezone="UTC")
def test_no_schedule_error(self):
with pytest.raises(
ValueError, match="Either interval, cron, or rrule must be provided"
):
construct_schedule()
def test_timedelta_interval_schedule(self):
interval = datetime.timedelta(minutes=5)
result = construct_schedule(interval=interval)
assert isinstance(result, IntervalSchedule)
assert result.interval == interval
def test_datetime_anchor_date(self):
anchor = now()
result = construct_schedule(interval=300, anchor_date=anchor)
assert result == IntervalSchedule(
interval=datetime.timedelta(seconds=300), anchor_date=anchor
)
def test_string_anchor_date(self):
anchor = "2023-01-01T00:00:00+00:00"
result = construct_schedule(interval=300, anchor_date=anchor)
assert result == IntervalSchedule(
interval=datetime.timedelta(seconds=300),
anchor_date=DateTime.fromisoformat(anchor),
)
@pytest.mark.parametrize(
"value",
[
"not even almost a boolean",
"{{ to.be.templated }}",
],
)
def test_invalid_active_value(self, value: str):
with pytest.raises(
ValueError, match="active must be able to be parsed as a boolean"
):
schedule = IntervalSchedule(interval=datetime.timedelta(seconds=300))
DeploymentScheduleCreate(active=value, schedule=schedule)
@pytest.mark.parametrize(
"value,expected",
[
("True", True),
("False", False),
("true", True),
("false", False),
("TRUE", True),
("FALSE", False),
("1", True),
("0", False),
],
)
def test_parsable_active_value(self, value: str, expected: bool):
schedule = IntervalSchedule(interval=datetime.timedelta(seconds=300))
assert (
DeploymentScheduleCreate(active=value, schedule=schedule).active == expected
)
| TestConstructSchedule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.