language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/tune/tests/test_run_experiment.py | {
"start": 462,
"end": 7994
} | class ____(unittest.TestCase):
def setUp(self):
os.environ["TUNE_STATE_REFRESH_PERIOD"] = "0.1"
register_trainable("f1", train_fn)
def tearDown(self):
ray.shutdown()
def testDict(self):
trials = run_experiments(
{
"foo": {
"run": "f1",
},
"bar": {
"run": "f1",
},
}
)
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperiment(self):
exp1 = Experiment(
**{
"name": "foo",
"run": "f1",
}
)
[trial] = run_experiments(exp1)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperimentList(self):
exp1 = Experiment(
**{
"name": "foo",
"run": "f1",
}
)
exp2 = Experiment(
**{
"name": "bar",
"run": "f1",
}
)
trials = run_experiments([exp1, exp2])
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testAutoregisterTrainable(self):
class B(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments(
{
"foo": {
"run": train_fn,
},
"bar": {"run": B},
}
)
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCheckpointAtEnd(self):
class MyTrainable(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def save_checkpoint(self, path):
checkpoint = os.path.join(path, "checkpoint")
with open(checkpoint, "w") as f:
f.write("OK")
trials = run_experiments(
{
"foo": {
"run": MyTrainable,
"checkpoint_config": CheckpointConfig(checkpoint_at_end=True),
}
}
)
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.checkpoint)
def testExportFormats(self):
class train_fn(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
path = os.path.join(export_dir, "exported")
with open(path, "w") as f:
f.write("OK")
return {export_formats[0]: path}
trials = run_experiments(
{"foo": {"run": train_fn, "export_formats": ["format"]}}
)
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(
os.path.exists(
os.path.join(trial.storage.trial_working_directory, "exported")
)
)
def testInvalidExportFormats(self):
class MyTrainable(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
return {}
def fail_trial():
run_experiments({"foo": {"run": MyTrainable, "export_formats": ["format"]}})
self.assertRaises(TuneError, fail_trial)
def testCustomResources(self):
ray.shutdown()
ray.init(resources={"hi": 3})
class MyTrainable(Trainable):
def step(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments(
{
"foo": {
"run": MyTrainable,
"resources_per_trial": {"cpu": 1, "custom_resources": {"hi": 2}},
}
}
)
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCustomLoggerNoAutoLogging(self):
"""Does not create CSV/JSON logger callbacks automatically"""
os.environ["TUNE_DISABLE_AUTO_CALLBACK_LOGGERS"] = "1"
class CustomLogger(Logger):
def on_result(self, result):
with open(os.path.join(self.logdir, "test.log"), "w") as f:
f.write("hi")
[trial] = run_experiments(
{"foo": {"run": "f1", "stop": {"training_iteration": 1}}},
callbacks=[LegacyLoggerCallback(logger_classes=[CustomLogger])],
)
self.assertTrue(os.path.exists(os.path.join(trial.local_path, "test.log")))
self.assertFalse(os.path.exists(os.path.join(trial.local_path, "params.json")))
[trial] = run_experiments(
{"foo": {"run": "f1", "stop": {"training_iteration": 1}}}
)
self.assertFalse(os.path.exists(os.path.join(trial.local_path, "params.json")))
[trial] = run_experiments(
{"foo": {"run": "f1", "stop": {"training_iteration": 1}}},
callbacks=[LegacyLoggerCallback(logger_classes=[])],
)
self.assertFalse(os.path.exists(os.path.join(trial.local_path, "params.json")))
def testCustomLoggerWithAutoLogging(self):
"""Creates CSV/JSON logger callbacks automatically"""
if "TUNE_DISABLE_AUTO_CALLBACK_LOGGERS" in os.environ:
del os.environ["TUNE_DISABLE_AUTO_CALLBACK_LOGGERS"]
class CustomLogger(Logger):
def on_result(self, result):
with open(os.path.join(self.logdir, "test.log"), "w") as f:
f.write("hi")
[trial] = run_experiments(
{"foo": {"run": "f1", "stop": {"training_iteration": 1}}},
callbacks=[LegacyLoggerCallback(logger_classes=[CustomLogger])],
)
self.assertTrue(os.path.exists(os.path.join(trial.local_path, "test.log")))
self.assertTrue(os.path.exists(os.path.join(trial.local_path, "params.json")))
[trial] = run_experiments(
{"foo": {"run": "f1", "stop": {"training_iteration": 1}}}
)
self.assertTrue(os.path.exists(os.path.join(trial.local_path, "params.json")))
[trial] = run_experiments(
{"foo": {"run": "f1", "stop": {"training_iteration": 1}}},
callbacks=[LegacyLoggerCallback(logger_classes=[])],
)
self.assertTrue(os.path.exists(os.path.join(trial.local_path, "params.json")))
def testCustomTrialString(self):
[trial] = run_experiments(
{
"foo": {
"run": "f1",
"stop": {"training_iteration": 1},
"trial_name_creator": lambda t: "{}_{}_321".format(
t.trainable_name, t.trial_id
),
}
}
)
self.assertEqual(
str(trial), "{}_{}_321".format(trial.trainable_name, trial.trial_id)
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| RunExperimentTest |
python | jina-ai__jina | jina/jaml/parsers/flow/v1.py | {
"start": 595,
"end": 6377
} | class ____(VersionedYAMLParser):
"""V1Parser introduces new syntax and features:
- It has a top-level field ``version``
- ``deployments`` is now a List of Dict (rather than a Dict as prev.)
- ``name`` is now optional
- new field ``method`` can be used to specify how to add this Deployment into the Flow, availables are:
- ``add``: (default) equal to `Flow.add(...)`
- ``needs``: (default) equal to `Flow.needs(...)`
- ``inspect``: (default) equal to `Flow.inspect(...)`
An example V1 YAML config can be found below:
.. highlight:: yaml
.. code-block:: yaml
!Flow
version: '1.0'
deployments:
- name: executor0 # notice the change here, name is now an attribute
method: add # by default method is always add, available: add, needs, inspect
needs: gateway
- name: executor1 # notice the change here, name is now an attribute
method: add # by default method is always add, available: add, needs, inspect
needs: gateway
- method: inspect # add an inspect node on executor1
- method: needs # let's try something new in Flow YAML v1: needs
needs: [executor1, executor0]
"""
version = '1' # the version number this parser designed for
def parse(
self, cls: type, data: Dict, runtime_args: Optional[Dict[str, Any]] = None
) -> 'Flow':
"""
:param cls: the class registered for dumping/loading
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
p = data.get('with', {}) # type: Dict[str, Any]
a = p.pop('args') if 'args' in p else ()
k = p.pop('kwargs') if 'kwargs' in p else {}
# maybe there are some hanging kwargs in "parameters"
tmp_a = (expand_env_var(v) for v in a)
tmp_p = {kk: expand_env_var(vv) for kk, vv in {**k, **p}.items()}
obj = cls(*tmp_a, **tmp_p)
pp = data.get('executors', data.get('deployments', []))
for deployments in pp:
if isinstance(deployments, str):
dep = Deployment.load_config(
deployments,
extra_search_paths=data.get('with', {}).get('extra_search_paths'),
include_gateway=False,
noblock_on_start=True,
)
getattr(obj, 'add')(dep, copy_flow=False)
elif (
isinstance(deployments, dict)
and deployments.get('jtype') == 'Deployment'
):
dep = Deployment.load_config(
deployments,
extra_search_paths=data.get('with', {}).get('extra_search_paths'),
include_gateway=False,
noblock_on_start=True,
)
getattr(obj, 'add')(dep, copy_flow=False)
else:
p_deployment_attr = {
kk: expand_env_var(vv) for kk, vv in deployments.items()
}
# in v1 YAML, flow is an optional argument
if p_deployment_attr.get('name', None) != GATEWAY_NAME:
# ignore gateway when reading, it will be added during build()
method = p_deployment_attr.get('method', 'add')
# support methods: add, needs, inspect
getattr(obj, method)(**p_deployment_attr, copy_flow=False)
gateway = data.get(GATEWAY_NAME, {})
if gateway:
gateway_attr = {kk: expand_env_var(vv) for kk, vv in gateway.items()}
obj.config_gateway(**gateway_attr, copy_flow=False)
return obj
def dump(self, data: 'Flow') -> Dict:
"""
:param data: versioned flow object
:return: the dictionary given a versioned flow object
"""
r = {}
if data._version:
r['version'] = data._version
# to maintain order - version -> with -> executors
r['with'] = {}
if data._kwargs:
r['with'].update(data._kwargs)
if data._common_kwargs:
r['with'].update(data._common_kwargs)
if data._deployment_nodes:
r['executors'] = []
last_name = GATEWAY_NAME
for k, v in data._deployment_nodes.items():
kwargs = {}
# only add "needs" when the value is not the last deployment name
if list(v.needs) != [last_name]:
kwargs = {'needs': list(v.needs)}
# get nondefault kwargs
parser = set_deployment_parser()
non_default_kw = ArgNamespace.get_non_defaults_args(v.args, parser)
kwargs.update(non_default_kw)
for t in _get_taboo(parser):
if t in kwargs:
kwargs.pop(t)
if k != GATEWAY_NAME:
last_name = v.args.name
r['executors'].append(kwargs)
gateway_kwargs = {}
gateway_parser = set_gateway_parser()
non_default_kw = ArgNamespace.get_non_defaults_args(
data.gateway_args, gateway_parser
)
gateway_kwargs.update(non_default_kw)
for t in _get_taboo(gateway_parser):
if t in gateway_kwargs:
gateway_kwargs.pop(t)
if 'JINA_FULL_CLI' in os.environ:
r['with'].update(gateway_kwargs)
if gateway_kwargs:
r[GATEWAY_NAME] = gateway_kwargs
return r
| V1Parser |
python | django__django | tests/model_fields/test_integerfield.py | {
"start": 10070,
"end": 10375
} | class ____(IntegerFieldTests):
model = PositiveBigIntegerModel
documented_range = (0, 9223372036854775807)
rel_db_type_class = (
models.PositiveBigIntegerField
if connection.features.related_fields_match_type
else models.BigIntegerField
)
| PositiveBigIntegerFieldTests |
python | huggingface__transformers | src/transformers/models/wavlm/configuration_wavlm.py | {
"start": 861,
"end": 18588
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the WavLM
[microsoft/wavlm-base](https://huggingface.co/microsoft/wavlm-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32):
Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`WavLMModel`]. Vocabulary size of the model. Defines the different tokens
that can be represented by the *inputs_ids* passed to the forward method of [`WavLMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`WavLMForCTC`].
layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more
details.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
length of *conv_kernel* defines the number of convolutional layers and has to match the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
False` corresponds to applying layer norm after the attention layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://huggingface.co/papers/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Probability of each feature vector along the time axis to be chosen as the start of the vector span to be
masked. Approximately `mask_time_prob * sequence_length // mask_time_length` feature vectors will be masked
along the time axis. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Probability of each feature vector along the feature axis to be chosen as the start of the vector span to
be masked. Approximately `mask_time_prob * hidden_size // mask_time_length` feature vectors will be masked
along the time axis. This is only relevant if `apply_spec_augment is True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
num_codevectors_per_group (`int`, *optional*, defaults to 320):
Number of entries in each quantization codebook (group).
num_codevector_groups (`int`, *optional*, defaults to 2):
Number of codevector groups for product codevector quantization.
contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
The temperature *kappa* in the contrastive loss.
num_negatives (`int`, *optional*, defaults to 100):
Number of negative samples for the contrastive loss.
codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the quantized feature vectors.
proj_codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the final projection of both the quantized and the transformer features.
diversity_loss_weight (`int`, *optional*, defaults to 0.1):
The weight of the codebook diversity loss component.
ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`WavLMForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`WavLMForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`WavLMForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
*XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
*XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
xvector_output_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
add_adapter (`bool`, *optional*, defaults to `False`):
Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for
warm-starting Wav2Vec2 for SpeechEncoderDecoder models.
adapter_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adapter_stride (`int`, *optional*, defaults to 2):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
num_adapter_layers (`int`, *optional*, defaults to 3):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
output_hidden_size (`int`, *optional*):
Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
if `add_adapter is True`.
Example:
```python
```
Example:
```python
>>> from transformers import WavLMConfig, WavLMModel
>>> # Initializing a WavLM facebook/wavlm-base-960h style configuration
>>> configuration = WavLMConfig()
>>> # Initializing a model (with random weights) from the facebook/wavlm-base-960h style configuration
>>> model = WavLMModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "wavlm"
def __init__(
self,
vocab_size=32,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
feat_proj_dropout=0.0,
final_dropout=0.1,
layerdrop=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
feat_extract_norm="group",
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
num_buckets=320,
max_bucket_distance=800,
do_stable_layer_norm=False,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
num_codevectors_per_group=320,
num_codevector_groups=2,
contrastive_logits_temperature=0.1,
num_negatives=100,
codevector_dim=256,
proj_codevector_dim=256,
diversity_loss_weight=0.1,
ctc_loss_reduction="mean",
ctc_zero_infinity=False,
use_weighted_layer_sum=False,
classifier_proj_size=256,
tdnn_dim=(512, 512, 512, 512, 1500),
tdnn_kernel=(5, 3, 3, 1, 1),
tdnn_dilation=(1, 2, 3, 1, 1),
xvector_output_dim=512,
num_ctc_classes=80,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
add_adapter=False,
adapter_kernel_size=3,
adapter_stride=2,
num_adapter_layers=3,
output_hidden_size=None,
**kwargs,
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_buckets = num_buckets
self.max_bucket_distance = max_bucket_distance
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.num_ctc_classes = num_ctc_classes
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
)
# fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
# parameters for pretraining with codevector quantized representations
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
# ctc loss
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# adapter
self.add_adapter = add_adapter
self.adapter_kernel_size = adapter_kernel_size
self.adapter_stride = adapter_stride
self.num_adapter_layers = num_adapter_layers
self.output_hidden_size = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
__all__ = ["WavLMConfig"]
| WavLMConfig |
python | coleifer__peewee | tests/regressions.py | {
"start": 39313,
"end": 39457
} | class ____(TestModel):
key = CharField()
cpk = ForeignKeyField(CPK)
class Meta:
primary_key = CompositeKey('key', 'cpk')
| CPKFK |
python | PrefectHQ__prefect | src/prefect/server/schemas/sorting.py | {
"start": 5166,
"end": 5949
} | class ____(AutoEnum):
"""Defines artifact sorting options."""
CREATED_DESC = AutoEnum.auto()
UPDATED_DESC = AutoEnum.auto()
ID_DESC = AutoEnum.auto()
KEY_DESC = AutoEnum.auto()
KEY_ASC = AutoEnum.auto()
@db_injector
def as_sql_sort(self, db: "PrefectDBInterface") -> Iterable[sa.ColumnElement[Any]]:
"""Return an expression used to sort task runs"""
sort_mapping: dict[str, Iterable[sa.ColumnElement[Any]]] = {
"CREATED_DESC": [db.Artifact.created.desc()],
"UPDATED_DESC": [db.Artifact.updated.desc()],
"ID_DESC": [db.Artifact.id.desc()],
"KEY_DESC": [db.Artifact.key.desc()],
"KEY_ASC": [db.Artifact.key.asc()],
}
return sort_mapping[self.value]
| ArtifactSort |
python | pytest-dev__pytest | testing/test_subtests.py | {
"start": 20393,
"end": 23237
} | class ____:
def create_file(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
import sys
def test(subtests):
print()
print('start test')
with subtests.test(i='A'):
print("hello stdout A")
print("hello stderr A", file=sys.stderr)
assert 0
with subtests.test(i='B'):
print("hello stdout B")
print("hello stderr B", file=sys.stderr)
assert 0
print('end test')
assert 0
"""
)
@pytest.mark.parametrize("mode", ["fd", "sys"])
def test_capturing(self, pytester: pytest.Pytester, mode: str) -> None:
self.create_file(pytester)
result = pytester.runpytest(f"--capture={mode}")
result.stdout.fnmatch_lines(
[
"*__ test (i='A') __*",
"*Captured stdout call*",
"hello stdout A",
"*Captured stderr call*",
"hello stderr A",
"*__ test (i='B') __*",
"*Captured stdout call*",
"hello stdout B",
"*Captured stderr call*",
"hello stderr B",
"*__ test __*",
"*Captured stdout call*",
"start test",
"end test",
]
)
def test_no_capture(self, pytester: pytest.Pytester) -> None:
self.create_file(pytester)
result = pytester.runpytest("-s")
result.stdout.fnmatch_lines(
[
"start test",
"hello stdout A",
"uhello stdout B",
"uend test",
"*__ test (i='A') __*",
"*__ test (i='B') __*",
"*__ test __*",
]
)
result.stderr.fnmatch_lines(["hello stderr A", "hello stderr B"])
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_capture_with_fixture(
self, pytester: pytest.Pytester, fixture: Literal["capsys", "capfd"]
) -> None:
pytester.makepyfile(
rf"""
import sys
def test(subtests, {fixture}):
print('start test')
with subtests.test(i='A'):
print("hello stdout A")
print("hello stderr A", file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'start test\nhello stdout A\n'
assert err == 'hello stderr A\n'
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*1 passed*",
]
)
| TestCapture |
python | numba__llvmlite | llvmlite/tests/test_ir.py | {
"start": 32499,
"end": 84867
} | class ____(TestBase):
"""
Test IR generation of LLVM instructions through the IRBuilder class.
"""
maxDiff = 4000
def test_simple(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
inst = builder.add(a, b, 'res')
self.check_block(block, """\
my_block:
%"res" = add i32 %".1", %".2"
""")
# Instructions should have a useful repr()
self.assertEqual(repr(inst),
"<ir.Instruction 'res' of type 'i32', opname 'add', "
"operands (<ir.Argument '.1' of type i32>, "
"<ir.Argument '.2' of type i32>)>")
def test_binops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, ff = builder.function.args[:3]
builder.add(a, b, 'c')
builder.fadd(a, b, 'd')
builder.sub(a, b, 'e')
builder.fsub(a, b, 'f')
builder.mul(a, b, 'g')
builder.fmul(a, b, 'h')
builder.udiv(a, b, 'i')
builder.sdiv(a, b, 'j')
builder.fdiv(a, b, 'k')
builder.urem(a, b, 'l')
builder.srem(a, b, 'm')
builder.frem(a, b, 'n')
builder.or_(a, b, 'o')
builder.and_(a, b, 'p')
builder.xor(a, b, 'q')
builder.shl(a, b, 'r')
builder.ashr(a, b, 's')
builder.lshr(a, b, 't')
with self.assertRaises(ValueError) as cm:
builder.add(a, ff)
self.assertEqual(str(cm.exception),
"Operands must be the same type, got (i32, double)")
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
%"d" = fadd i32 %".1", %".2"
%"e" = sub i32 %".1", %".2"
%"f" = fsub i32 %".1", %".2"
%"g" = mul i32 %".1", %".2"
%"h" = fmul i32 %".1", %".2"
%"i" = udiv i32 %".1", %".2"
%"j" = sdiv i32 %".1", %".2"
%"k" = fdiv i32 %".1", %".2"
%"l" = urem i32 %".1", %".2"
%"m" = srem i32 %".1", %".2"
%"n" = frem i32 %".1", %".2"
%"o" = or i32 %".1", %".2"
%"p" = and i32 %".1", %".2"
%"q" = xor i32 %".1", %".2"
%"r" = shl i32 %".1", %".2"
%"s" = ashr i32 %".1", %".2"
%"t" = lshr i32 %".1", %".2"
""")
def test_binop_flags(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
# As tuple
builder.add(a, b, 'c', flags=('nuw',))
# and as list
builder.sub(a, b, 'd', flags=['nuw', 'nsw'])
self.check_block(block, """\
my_block:
%"c" = add nuw i32 %".1", %".2"
%"d" = sub nuw nsw i32 %".1", %".2"
""")
def test_binop_fastmath_flags(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
# As tuple
builder.fadd(a, b, 'c', flags=('fast',))
# and as list
builder.fsub(a, b, 'd', flags=['ninf', 'nsz'])
self.check_block(block, """\
my_block:
%"c" = fadd fast i32 %".1", %".2"
%"d" = fsub ninf nsz i32 %".1", %".2"
""")
def test_binops_with_overflow(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.sadd_with_overflow(a, b, 'c')
builder.smul_with_overflow(a, b, 'd')
builder.ssub_with_overflow(a, b, 'e')
builder.uadd_with_overflow(a, b, 'f')
builder.umul_with_overflow(a, b, 'g')
builder.usub_with_overflow(a, b, 'h')
self.check_block(block, """\
my_block:
%"c" = call {i32, i1} @"llvm.sadd.with.overflow.i32"(i32 %".1", i32 %".2")
%"d" = call {i32, i1} @"llvm.smul.with.overflow.i32"(i32 %".1", i32 %".2")
%"e" = call {i32, i1} @"llvm.ssub.with.overflow.i32"(i32 %".1", i32 %".2")
%"f" = call {i32, i1} @"llvm.uadd.with.overflow.i32"(i32 %".1", i32 %".2")
%"g" = call {i32, i1} @"llvm.umul.with.overflow.i32"(i32 %".1", i32 %".2")
%"h" = call {i32, i1} @"llvm.usub.with.overflow.i32"(i32 %".1", i32 %".2")
""")
def test_unary_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, c = builder.function.args[:3]
builder.neg(a, 'd')
builder.not_(b, 'e')
builder.fneg(c, 'f')
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"d" = sub i32 0, %".1"
%"e" = xor i32 %".2", -1
%"f" = fneg double %".3"
""")
def test_replace_operand(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
undef1 = ir.Constant(ir.IntType(32), ir.Undefined)
undef2 = ir.Constant(ir.IntType(32), ir.Undefined)
c = builder.add(undef1, undef2, 'c')
self.check_block(block, """\
my_block:
%"c" = add i32 undef, undef
""")
c.replace_usage(undef1, a)
c.replace_usage(undef2, b)
self.check_block(block, """\
my_block:
%"c" = add i32 %".1", %".2"
""")
def test_integer_comparisons(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.icmp_unsigned('==', a, b, 'c')
builder.icmp_unsigned('!=', a, b, 'd')
builder.icmp_unsigned('<', a, b, 'e')
builder.icmp_unsigned('<=', a, b, 'f')
builder.icmp_unsigned('>', a, b, 'g')
builder.icmp_unsigned('>=', a, b, 'h')
builder.icmp_signed('==', a, b, 'i')
builder.icmp_signed('!=', a, b, 'j')
builder.icmp_signed('<', a, b, 'k')
builder.icmp_signed('<=', a, b, 'l')
builder.icmp_signed('>', a, b, 'm')
builder.icmp_signed('>=', a, b, 'n')
with self.assertRaises(ValueError):
builder.icmp_signed('uno', a, b, 'zz')
with self.assertRaises(ValueError):
builder.icmp_signed('foo', a, b, 'zz')
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = icmp eq i32 %".1", %".2"
%"d" = icmp ne i32 %".1", %".2"
%"e" = icmp ult i32 %".1", %".2"
%"f" = icmp ule i32 %".1", %".2"
%"g" = icmp ugt i32 %".1", %".2"
%"h" = icmp uge i32 %".1", %".2"
%"i" = icmp eq i32 %".1", %".2"
%"j" = icmp ne i32 %".1", %".2"
%"k" = icmp slt i32 %".1", %".2"
%"l" = icmp sle i32 %".1", %".2"
%"m" = icmp sgt i32 %".1", %".2"
%"n" = icmp sge i32 %".1", %".2"
""")
def test_float_comparisons(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.fcmp_ordered('==', a, b, 'c')
builder.fcmp_ordered('!=', a, b, 'd')
builder.fcmp_ordered('<', a, b, 'e')
builder.fcmp_ordered('<=', a, b, 'f')
builder.fcmp_ordered('>', a, b, 'g')
builder.fcmp_ordered('>=', a, b, 'h')
builder.fcmp_unordered('==', a, b, 'i')
builder.fcmp_unordered('!=', a, b, 'j')
builder.fcmp_unordered('<', a, b, 'k')
builder.fcmp_unordered('<=', a, b, 'l')
builder.fcmp_unordered('>', a, b, 'm')
builder.fcmp_unordered('>=', a, b, 'n')
# fcmp_ordered and fcmp_unordered are the same for these cases
builder.fcmp_ordered('ord', a, b, 'u')
builder.fcmp_ordered('uno', a, b, 'v')
builder.fcmp_unordered('ord', a, b, 'w')
builder.fcmp_unordered('uno', a, b, 'x')
builder.fcmp_unordered('olt', a, b, 'y',
flags=['nnan', 'ninf', 'nsz', 'arcp', 'fast'])
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = fcmp oeq i32 %".1", %".2"
%"d" = fcmp one i32 %".1", %".2"
%"e" = fcmp olt i32 %".1", %".2"
%"f" = fcmp ole i32 %".1", %".2"
%"g" = fcmp ogt i32 %".1", %".2"
%"h" = fcmp oge i32 %".1", %".2"
%"i" = fcmp ueq i32 %".1", %".2"
%"j" = fcmp une i32 %".1", %".2"
%"k" = fcmp ult i32 %".1", %".2"
%"l" = fcmp ule i32 %".1", %".2"
%"m" = fcmp ugt i32 %".1", %".2"
%"n" = fcmp uge i32 %".1", %".2"
%"u" = fcmp ord i32 %".1", %".2"
%"v" = fcmp uno i32 %".1", %".2"
%"w" = fcmp ord i32 %".1", %".2"
%"x" = fcmp uno i32 %".1", %".2"
%"y" = fcmp nnan ninf nsz arcp fast olt i32 %".1", %".2"
""")
def test_misc_ops(self):
block = self.block(name='my_block')
t = ir.Constant(int1, True)
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
builder.select(t, a, b, 'c', flags=('arcp', 'nnan'))
self.assertFalse(block.is_terminated)
builder.unreachable()
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
%"c" = select arcp nnan i1 true, i32 %".1", i32 %".2"
unreachable
""")
def test_phi(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
bb2 = builder.function.append_basic_block('b2')
bb3 = builder.function.append_basic_block('b3')
phi = builder.phi(int32, 'my_phi', flags=('fast',))
phi.add_incoming(a, bb2)
phi.add_incoming(b, bb3)
self.assertFalse(block.is_terminated)
self.check_block(block, """\
my_block:
%"my_phi" = phi fast i32 [%".1", %"b2"], [%".2", %"b3"]
""")
def test_mem_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, z = builder.function.args[:3]
c = builder.alloca(int32, name='c')
d = builder.alloca(int32, size=42, name='d') # noqa F841
e = builder.alloca(dbl, size=a, name='e')
e.align = 8
self.assertEqual(e.type, ir.PointerType(dbl))
ee = builder.store(z, e)
self.assertEqual(ee.type, ir.VoidType())
f = builder.store(b, c)
self.assertEqual(f.type, ir.VoidType())
g = builder.load(c, 'g')
self.assertEqual(g.type, int32)
# With alignment
h = builder.store(b, c, align=1)
self.assertEqual(h.type, ir.VoidType())
i = builder.load(c, 'i', align=1)
self.assertEqual(i.type, int32)
# Atomics
j = builder.store_atomic(b, c, ordering="seq_cst", align=4)
self.assertEqual(j.type, ir.VoidType())
k = builder.load_atomic(c, ordering="seq_cst", align=4, name='k')
self.assertEqual(k.type, int32)
if not ir_layer_typed_pointers_enabled:
ptr = ir.Constant(ir.PointerType(), None)
else:
ptr = ir.Constant(ir.PointerType(int32), None)
builder.store(ir.Constant(int32, 5), ptr)
# Not pointer types
with self.assertRaises(TypeError):
builder.store(b, a)
with self.assertRaises(TypeError):
builder.load(b)
# Mismatching pointer type
with self.assertRaises(TypeError) as cm:
builder.store(b, e)
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(cm.exception),
"cannot store i32 to ptr: mismatching types")
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = alloca i32, i32 42
%"e" = alloca double, i32 %".1", align 8
store double %".3", ptr %"e"
store i32 %".2", ptr %"c"
%"g" = load i32, ptr %"c"
store i32 %".2", ptr %"c", align 1
%"i" = load i32, ptr %"c", align 1
store atomic i32 %".2", ptr %"c" seq_cst, align 4
%"k" = load atomic i32, ptr %"c" seq_cst, align 4
store i32 5, ptr null
""")
else:
self.assertEqual(str(cm.exception),
"cannot store i32 to double*: mismatching types")
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = alloca i32, i32 42
%"e" = alloca double, i32 %".1", align 8
store double %".3", double* %"e"
store i32 %".2", i32* %"c"
%"g" = load i32, i32* %"c"
store i32 %".2", i32* %"c", align 1
%"i" = load i32, i32* %"c", align 1
store atomic i32 %".2", i32* %"c" seq_cst, align 4
%"k" = load atomic i32, i32* %"c" seq_cst, align 4
store i32 5, i32* null
""")
def test_gep(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.alloca(ir.PointerType(int32), name='c')
d = builder.gep(c, [ir.Constant(int32, 5), a], name='d')
self.assertEqual(d.type, ir.PointerType(int32))
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"c" = alloca ptr
%"d" = getelementptr ptr, ptr %"c", i32 5, i32 %".1"
""")
else:
self.check_block(block, """\
my_block:
%"c" = alloca i32*
%"d" = getelementptr i32*, i32** %"c", i32 5, i32 %".1"
""")
# XXX test with more complex types
def test_gep_castinstr(self):
# similar to:
# numba::runtime::nrtdynmod.py_define_nrt_meminfo_data()
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
int8ptr = int8.as_pointer()
ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64])
d = builder.bitcast(a, ls.as_pointer(), name='d')
e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e')
self.assertEqual(e.type, ir.PointerType(int8ptr))
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to ptr
%"e" = getelementptr {i64, ptr, ptr, ptr, i64}, ptr %"d", i32 0, i32 3
""") # noqa E501
else:
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64}*
%"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64}* %"d", i32 0, i32 3
""") # noqa E501
def test_gep_castinstr_addrspace(self):
# similar to:
# numba::runtime::nrtdynmod.py_define_nrt_meminfo_data()
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
addrspace = 4
int8ptr = int8.as_pointer()
ls = ir.LiteralStructType([int64, int8ptr, int8ptr, int8ptr, int64])
d = builder.bitcast(a, ls.as_pointer(addrspace=addrspace), name='d')
e = builder.gep(d, [ir.Constant(int32, x) for x in [0, 3]], name='e')
self.assertEqual(e.type.addrspace, addrspace)
self.assertEqual(e.type, ir.PointerType(int8ptr, addrspace=addrspace))
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to ptr addrspace(4)
%"e" = getelementptr {i64, ptr, ptr, ptr, i64}, ptr addrspace(4) %"d", i32 0, i32 3
""") # noqa E501
else:
self.check_block(block, """\
my_block:
%"d" = bitcast i32 %".1" to {i64, i8*, i8*, i8*, i64} addrspace(4)*
%"e" = getelementptr {i64, i8*, i8*, i8*, i64}, {i64, i8*, i8*, i8*, i64} addrspace(4)* %"d", i32 0, i32 3
""") # noqa E501
def test_gep_addrspace(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
addrspace = 4
c = builder.alloca(ir.PointerType(int32, addrspace=addrspace), name='c')
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c.type), 'ptr')
else:
self.assertEqual(str(c.type), 'i32 addrspace(4)**')
self.assertEqual(c.type.pointee.addrspace, addrspace)
d = builder.gep(c, [ir.Constant(int32, 5), a], name='d')
self.assertEqual(d.type.addrspace, addrspace)
e = builder.gep(d, [ir.Constant(int32, 10)], name='e')
self.assertEqual(e.type.addrspace, addrspace)
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"c" = alloca ptr addrspace(4)
%"d" = getelementptr ptr addrspace(4), ptr %"c", i32 5, i32 %".1"
%"e" = getelementptr i32, ptr addrspace(4) %"d", i32 10
""") # noqa E501
else:
self.check_block(block, """\
my_block:
%"c" = alloca i32 addrspace(4)*
%"d" = getelementptr i32 addrspace(4)*, i32 addrspace(4)** %"c", i32 5, i32 %".1"
%"e" = getelementptr i32, i32 addrspace(4)* %"d", i32 10
""") # noqa E501
def test_extract_insert_value(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_inner = ir.LiteralStructType([int32, int1])
tp_outer = ir.LiteralStructType([int8, tp_inner])
c_inner = ir.Constant(tp_inner, (ir.Constant(int32, 4),
ir.Constant(int1, True)))
# Flat structure
c = builder.extract_value(c_inner, 0, name='c') # noqa F841
d = builder.insert_value(c_inner, a, 0, name='d') # noqa F841
e = builder.insert_value(d, ir.Constant(int1, False), 1, name='e') # noqa F841 E501
self.assertEqual(d.type, tp_inner)
self.assertEqual(e.type, tp_inner)
# Nested structure
p_outer = builder.alloca(tp_outer, name='ptr')
j = builder.load(p_outer, name='j')
k = builder.extract_value(j, 0, name='k')
l = builder.extract_value(j, 1, name='l')
m = builder.extract_value(j, (1, 0), name='m')
n = builder.extract_value(j, (1, 1), name='n')
o = builder.insert_value(j, l, 1, name='o')
p = builder.insert_value(j, a, (1, 0), name='p')
self.assertEqual(k.type, int8)
self.assertEqual(l.type, tp_inner)
self.assertEqual(m.type, int32)
self.assertEqual(n.type, int1)
self.assertEqual(o.type, tp_outer)
self.assertEqual(p.type, tp_outer)
with self.assertRaises(TypeError):
# Not an aggregate
builder.extract_value(p_outer, 0)
with self.assertRaises(TypeError):
# Indexing too deep
builder.extract_value(c_inner, (0, 0))
with self.assertRaises(TypeError):
# Index out of structure bounds
builder.extract_value(c_inner, 5)
with self.assertRaises(TypeError):
# Not an aggregate
builder.insert_value(a, b, 0)
with self.assertRaises(TypeError):
# Replacement value has the wrong type
builder.insert_value(c_inner, a, 1)
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"c" = extractvalue {i32, i1} {i32 4, i1 true}, 0
%"d" = insertvalue {i32, i1} {i32 4, i1 true}, i32 %".1", 0
%"e" = insertvalue {i32, i1} %"d", i1 false, 1
%"ptr" = alloca {i8, {i32, i1}}
%"j" = load {i8, {i32, i1}}, ptr %"ptr"
%"k" = extractvalue {i8, {i32, i1}} %"j", 0
%"l" = extractvalue {i8, {i32, i1}} %"j", 1
%"m" = extractvalue {i8, {i32, i1}} %"j", 1, 0
%"n" = extractvalue {i8, {i32, i1}} %"j", 1, 1
%"o" = insertvalue {i8, {i32, i1}} %"j", {i32, i1} %"l", 1
%"p" = insertvalue {i8, {i32, i1}} %"j", i32 %".1", 1, 0
""")
else:
self.check_block(block, """\
my_block:
%"c" = extractvalue {i32, i1} {i32 4, i1 true}, 0
%"d" = insertvalue {i32, i1} {i32 4, i1 true}, i32 %".1", 0
%"e" = insertvalue {i32, i1} %"d", i1 false, 1
%"ptr" = alloca {i8, {i32, i1}}
%"j" = load {i8, {i32, i1}}, {i8, {i32, i1}}* %"ptr"
%"k" = extractvalue {i8, {i32, i1}} %"j", 0
%"l" = extractvalue {i8, {i32, i1}} %"j", 1
%"m" = extractvalue {i8, {i32, i1}} %"j", 1, 0
%"n" = extractvalue {i8, {i32, i1}} %"j", 1, 1
%"o" = insertvalue {i8, {i32, i1}} %"j", {i32, i1} %"l", 1
%"p" = insertvalue {i8, {i32, i1}} %"j", i32 %".1", 1, 0
""")
def test_cast_ops(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b, fa, ptr = builder.function.args[:4]
c = builder.trunc(a, int8, name='c')
d = builder.zext(c, int32, name='d') # noqa F841
e = builder.sext(c, int32, name='e') # noqa F841
fb = builder.fptrunc(fa, flt, 'fb')
fc = builder.fpext(fb, dbl, 'fc') # noqa F841
g = builder.fptoui(fa, int32, 'g')
h = builder.fptosi(fa, int8, 'h')
fd = builder.uitofp(g, flt, 'fd') # noqa F841
fe = builder.sitofp(h, dbl, 'fe') # noqa F841
i = builder.ptrtoint(ptr, int32, 'i')
j = builder.inttoptr(i, ir.PointerType(int8), 'j') # noqa F841
k = builder.bitcast(a, flt, "k") # noqa F841
self.assertFalse(block.is_terminated)
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"c" = trunc i32 %".1" to i8
%"d" = zext i8 %"c" to i32
%"e" = sext i8 %"c" to i32
%"fb" = fptrunc double %".3" to float
%"fc" = fpext float %"fb" to double
%"g" = fptoui double %".3" to i32
%"h" = fptosi double %".3" to i8
%"fd" = uitofp i32 %"g" to float
%"fe" = sitofp i8 %"h" to double
%"i" = ptrtoint ptr %".4" to i32
%"j" = inttoptr i32 %"i" to ptr
%"k" = bitcast i32 %".1" to float
""")
else:
self.check_block(block, """\
my_block:
%"c" = trunc i32 %".1" to i8
%"d" = zext i8 %"c" to i32
%"e" = sext i8 %"c" to i32
%"fb" = fptrunc double %".3" to float
%"fc" = fpext float %"fb" to double
%"g" = fptoui double %".3" to i32
%"h" = fptosi double %".3" to i8
%"fd" = uitofp i32 %"g" to float
%"fe" = sitofp i8 %"h" to double
%"i" = ptrtoint i32* %".4" to i32
%"j" = inttoptr i32 %"i" to i8*
%"k" = bitcast i32 %".1" to float
""")
def test_atomicrmw(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.alloca(int32, name='c')
d = builder.atomic_rmw('add', c, a, 'monotonic', 'd')
self.assertEqual(d.type, int32)
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = atomicrmw add ptr %"c", i32 %".1" monotonic
""")
else:
self.check_block(block, """\
my_block:
%"c" = alloca i32
%"d" = atomicrmw add i32* %"c", i32 %".1" monotonic
""")
def test_branch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_target = builder.function.append_basic_block(name='target')
builder.branch(bb_target)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br label %"target"
""")
def test_cbranch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_true = builder.function.append_basic_block(name='b_true')
bb_false = builder.function.append_basic_block(name='b_false')
builder.cbranch(ir.Constant(int1, False), bb_true, bb_false)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br i1 false, label %"b_true", label %"b_false"
""")
def test_cbranch_weights(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_true = builder.function.append_basic_block(name='b_true')
bb_false = builder.function.append_basic_block(name='b_false')
br = builder.cbranch(ir.Constant(int1, False), bb_true, bb_false)
br.set_weights([5, 42])
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
br i1 false, label %"b_true", label %"b_false", !prof !0
""")
self.check_metadata(builder.module, """\
!0 = !{ !"branch_weights", i32 5, i32 42 }
""")
def test_branch_indirect(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
bb_1 = builder.function.append_basic_block(name='b_1')
bb_2 = builder.function.append_basic_block(name='b_2')
indirectbr = builder.branch_indirect(
ir.BlockAddress(builder.function, bb_1))
indirectbr.add_destination(bb_1)
indirectbr.add_destination(bb_2)
self.assertTrue(block.is_terminated)
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
indirectbr ptr blockaddress(@"my_func", %"b_1"), [label %"b_1", label %"b_2"]
""") # noqa E501
else:
self.check_block(block, """\
my_block:
indirectbr i8* blockaddress(@"my_func", %"b_1"), [label %"b_1", label %"b_2"]
""") # noqa E501
def test_returns(self):
def check(block, expected_ir):
self.assertTrue(block.is_terminated)
self.check_block(block, expected_ir)
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
builder.ret_void()
check(block, """\
my_block:
ret void
""")
block = self.block(name='other_block')
builder = ir.IRBuilder(block)
builder.ret(int32(5))
check(block, """\
other_block:
ret i32 5
""")
# With metadata
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
inst = builder.ret_void()
inst.set_metadata("dbg", block.module.add_metadata(()))
check(block, """\
my_block:
ret void, !dbg !0
""")
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
inst = builder.ret(int32(6))
inst.set_metadata("dbg", block.module.add_metadata(()))
check(block, """\
my_block:
ret i32 6, !dbg !0
""")
def test_switch(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
bb_onzero = builder.function.append_basic_block(name='onzero')
bb_onone = builder.function.append_basic_block(name='onone')
bb_ontwo = builder.function.append_basic_block(name='ontwo')
bb_else = builder.function.append_basic_block(name='otherwise')
sw = builder.switch(a, bb_else)
sw.add_case(ir.Constant(int32, 0), bb_onzero)
sw.add_case(ir.Constant(int32, 1), bb_onone)
# A plain Python value gets converted into the right IR constant
sw.add_case(2, bb_ontwo)
self.assertTrue(block.is_terminated)
self.check_block(block, """\
my_block:
switch i32 %".1", label %"otherwise" [i32 0, label %"onzero" i32 1, label %"onone" i32 2, label %"ontwo"]
""") # noqa E501
def test_call(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_f = ir.FunctionType(flt, (int32, int32))
tp_g = ir.FunctionType(dbl, (int32,), var_arg=True)
tp_h = ir.FunctionType(hlf, (int32, int32))
f = ir.Function(builder.function.module, tp_f, 'f')
g = ir.Function(builder.function.module, tp_g, 'g')
h = ir.Function(builder.function.module, tp_h, 'h')
builder.call(f, (a, b), 'res_f')
builder.call(g, (b, a), 'res_g')
builder.call(h, (a, b), 'res_h')
builder.call(f, (a, b), 'res_f_fast', cconv='fastcc')
res_f_readonly = builder.call(f, (a, b), 'res_f_readonly')
res_f_readonly.attributes.add('readonly')
builder.call(f, (a, b), 'res_fast', fastmath='fast')
builder.call(f, (a, b), 'res_nnan_ninf', fastmath=('nnan', 'ninf'))
builder.call(f, (a, b), 'res_noinline', attrs='noinline')
builder.call(f, (a, b), 'res_alwaysinline', attrs='alwaysinline')
builder.call(f, (a, b), 'res_noinline_ro', attrs=('noinline',
'readonly'))
builder.call(f, (a, b), 'res_convergent', attrs='convergent')
self.check_block(block, """\
my_block:
%"res_f" = call float @"f"(i32 %".1", i32 %".2")
%"res_g" = call double (i32, ...) @"g"(i32 %".2", i32 %".1")
%"res_h" = call half @"h"(i32 %".1", i32 %".2")
%"res_f_fast" = call fastcc float @"f"(i32 %".1", i32 %".2")
%"res_f_readonly" = call float @"f"(i32 %".1", i32 %".2") readonly
%"res_fast" = call fast float @"f"(i32 %".1", i32 %".2")
%"res_nnan_ninf" = call ninf nnan float @"f"(i32 %".1", i32 %".2")
%"res_noinline" = call float @"f"(i32 %".1", i32 %".2") noinline
%"res_alwaysinline" = call float @"f"(i32 %".1", i32 %".2") alwaysinline
%"res_noinline_ro" = call float @"f"(i32 %".1", i32 %".2") noinline readonly
%"res_convergent" = call float @"f"(i32 %".1", i32 %".2") convergent
""") # noqa E501
def test_call_metadata(self):
"""
Function calls with metadata arguments.
"""
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
dbg_declare_ty = ir.FunctionType(ir.VoidType(), [ir.MetaDataType()] * 3)
dbg_declare = ir.Function(
builder.module,
dbg_declare_ty,
'llvm.dbg.declare')
a = builder.alloca(int32, name="a")
b = builder.module.add_metadata(())
builder.call(dbg_declare, (a, b, b))
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"a" = alloca i32
call void @"llvm.dbg.declare"(metadata ptr %"a", metadata !0, metadata !0)
""") # noqa E501
else:
self.check_block(block, """\
my_block:
%"a" = alloca i32
call void @"llvm.dbg.declare"(metadata i32* %"a", metadata !0, metadata !0)
""") # noqa E501
def test_call_attributes(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
fun_ty = ir.FunctionType(
ir.VoidType(), (int32.as_pointer(), int32, int32.as_pointer()))
fun = ir.Function(builder.function.module, fun_ty, 'fun')
fun.args[0].add_attribute('sret')
retval = builder.alloca(int32, name='retval')
other = builder.alloca(int32, name='other')
builder.call(
fun,
(retval, ir.Constant(int32, 42), other),
arg_attrs={
0: ('sret', 'noalias'),
2: 'noalias'
}
)
if not ir_layer_typed_pointers_enabled:
self.check_block_regex(block, """\
my_block:
%"retval" = alloca i32
%"other" = alloca i32
call void @"fun"\\(ptr noalias sret(\\(i32\\))? %"retval", i32 42, ptr noalias %"other"\\)
""") # noqa E501
else:
self.check_block_regex(block, """\
my_block:
%"retval" = alloca i32
%"other" = alloca i32
call void @"fun"\\(i32\\* noalias sret(\\(i32\\))? %"retval", i32 42, i32\\* noalias %"other"\\)
""") # noqa E501
def test_call_tail(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
fun_ty = ir.FunctionType(ir.VoidType(), ())
fun = ir.Function(builder.function.module, fun_ty, 'my_fun')
builder.call(fun, ())
builder.call(fun, (), tail=False)
builder.call(fun, (), tail=True)
builder.call(fun, (), tail='tail')
builder.call(fun, (), tail='notail')
builder.call(fun, (), tail='musttail')
builder.call(fun, (), tail=[]) # This is a falsy value
builder.call(fun, (), tail='not a marker') # This is a truthy value
self.check_block(block, """\
my_block:
call void @"my_fun"()
call void @"my_fun"()
tail call void @"my_fun"()
tail call void @"my_fun"()
notail call void @"my_fun"()
musttail call void @"my_fun"()
call void @"my_fun"()
tail call void @"my_fun"()
""") # noqa E501
def test_invalid_call_attributes(self):
block = self.block()
builder = ir.IRBuilder(block)
fun_ty = ir.FunctionType(ir.VoidType(), ())
fun = ir.Function(builder.function.module, fun_ty, 'fun')
with self.assertRaises(ValueError):
# The function has no arguments, so this should fail.
builder.call(fun, (), arg_attrs={0: 'sret'})
def test_invoke(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
tp_f = ir.FunctionType(flt, (int32, int32))
f = ir.Function(builder.function.module, tp_f, 'f')
bb_normal = builder.function.append_basic_block(name='normal')
bb_unwind = builder.function.append_basic_block(name='unwind')
builder.invoke(f, (a, b), bb_normal, bb_unwind, 'res_f')
self.check_block(block, """\
my_block:
%"res_f" = invoke float @"f"(i32 %".1", i32 %".2")
to label %"normal" unwind label %"unwind"
""")
def test_invoke_attributes(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
fun_ty = ir.FunctionType(
ir.VoidType(), (int32.as_pointer(), int32, int32.as_pointer()))
fun = ir.Function(builder.function.module, fun_ty, 'fun')
fun.calling_convention = "fastcc"
fun.args[0].add_attribute('sret')
retval = builder.alloca(int32, name='retval')
other = builder.alloca(int32, name='other')
bb_normal = builder.function.append_basic_block(name='normal')
bb_unwind = builder.function.append_basic_block(name='unwind')
builder.invoke(
fun,
(retval, ir.Constant(int32, 42), other),
bb_normal,
bb_unwind,
cconv='fastcc',
fastmath='fast',
attrs='noinline',
arg_attrs={
0: ('sret', 'noalias'),
2: 'noalias'
}
)
if not ir_layer_typed_pointers_enabled:
self.check_block_regex(block, """\
my_block:
%"retval" = alloca i32
%"other" = alloca i32
invoke fast fastcc void @"fun"\\(ptr noalias sret(\\(i32\\))? %"retval", i32 42, ptr noalias %"other"\\) noinline
to label %"normal" unwind label %"unwind"
""") # noqa E501
else:
self.check_block_regex(block, """\
my_block:
%"retval" = alloca i32
%"other" = alloca i32
invoke fast fastcc void @"fun"\\(i32\\* noalias sret(\\(i32\\))? %"retval", i32 42, i32\\* noalias %"other"\\) noinline
to label %"normal" unwind label %"unwind"
""") # noqa E501
def test_landingpad(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
lp = builder.landingpad(ir.LiteralStructType([int32,
int8.as_pointer()]), 'lp')
int_typeinfo = ir.GlobalVariable(builder.function.module,
int8.as_pointer(), "_ZTIi")
int_typeinfo.global_constant = True
lp.add_clause(ir.CatchClause(int_typeinfo))
lp.add_clause(ir.FilterClause(ir.Constant(ir.ArrayType(
int_typeinfo.type, 1), [int_typeinfo])))
builder.resume(lp)
if not ir_layer_typed_pointers_enabled:
self.check_block(block, """\
my_block:
%"lp" = landingpad {i32, ptr}
catch ptr @"_ZTIi"
filter [1 x ptr] [ptr @"_ZTIi"]
resume {i32, ptr} %"lp"
""")
else:
self.check_block(block, """\
my_block:
%"lp" = landingpad {i32, i8*}
catch i8** @"_ZTIi"
filter [1 x i8**] [i8** @"_ZTIi"]
resume {i32, i8*} %"lp"
""")
def test_assume(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
c = builder.icmp_signed('>', a, b, name='c')
builder.assume(c)
self.check_block(block, """\
my_block:
%"c" = icmp sgt i32 %".1", %".2"
call void @"llvm.assume"(i1 %"c")
""")
def test_vector_ops(self):
block = self.block(name='insert_block')
builder = ir.IRBuilder(block)
a, b = builder.function.args[:2]
a.name = 'a'
b.name = 'b'
vecty = ir.VectorType(a.type, 2)
vec = ir.Constant(vecty, ir.Undefined)
idxty = ir.IntType(32)
vec = builder.insert_element(vec, a, idxty(0), name='vec1')
vec = builder.insert_element(vec, b, idxty(1), name='vec2')
self.check_block(block, """\
insert_block:
%"vec1" = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %"a", i32 0
%"vec2" = insertelement <2 x i32> %"vec1", i32 %"b", i32 1
""")
block = builder.append_basic_block("shuffle_block")
builder.branch(block)
builder.position_at_end(block)
mask = ir.Constant(vecty, [1, 0])
builder.shuffle_vector(vec, vec, mask, name='shuf')
self.check_block(block, """\
shuffle_block:
%"shuf" = shufflevector <2 x i32> %"vec2", <2 x i32> %"vec2", <2 x i32> <i32 1, i32 0>
""") # noqa E501
block = builder.append_basic_block("add_block")
builder.branch(block)
builder.position_at_end(block)
builder.add(vec, vec, name='sum')
self.check_block(block, """\
add_block:
%"sum" = add <2 x i32> %"vec2", %"vec2"
""")
block = builder.append_basic_block("extract_block")
builder.branch(block)
builder.position_at_end(block)
c = builder.extract_element(vec, idxty(0), name='ex1')
d = builder.extract_element(vec, idxty(1), name='ex2')
self.check_block(block, """\
extract_block:
%"ex1" = extractelement <2 x i32> %"vec2", i32 0
%"ex2" = extractelement <2 x i32> %"vec2", i32 1
""")
builder.ret(builder.add(c, d))
self.assert_valid_ir(builder.module)
def test_bitreverse(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
c = builder.bitreverse(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i64 @"llvm.bitreverse.i64"(i64 5)
ret i64 %"c"
""")
def test_bitreverse_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
with self.assertRaises(TypeError) as raises:
builder.bitreverse(a, name='c')
self.assertIn(
"expected an integer type, got float",
str(raises.exception))
def test_fence(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
with self.assertRaises(ValueError) as raises:
builder.fence("monotonic", None)
self.assertIn(
"Invalid fence ordering \"monotonic\"!",
str(raises.exception))
with self.assertRaises(ValueError) as raises:
builder.fence(None, "monotonic")
self.assertIn(
"Invalid fence ordering \"None\"!",
str(raises.exception))
builder.fence("acquire", None)
builder.fence("release", "singlethread")
builder.fence("acq_rel", "singlethread")
builder.fence("seq_cst")
builder.ret_void()
self.check_block(block, """\
my_block:
fence acquire
fence syncscope("singlethread") release
fence syncscope("singlethread") acq_rel
fence seq_cst
ret void
""")
def test_comment(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
with self.assertRaises(AssertionError):
builder.comment("so\nmany lines")
builder.comment("my comment")
builder.ret_void()
self.check_block(block, """\
my_block:
; my comment
ret void
""")
def test_bswap(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int32, 5)
c = builder.bswap(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i32 @"llvm.bswap.i32"(i32 5)
ret i32 %"c"
""")
def test_ctpop(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
c = builder.ctpop(a, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i16 @"llvm.ctpop.i16"(i16 5)
ret i16 %"c"
""")
def test_ctlz(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
b = ir.Constant(int1, 1)
c = builder.ctlz(a, b, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i16 @"llvm.ctlz.i16"(i16 5, i1 1)
ret i16 %"c"
""")
def test_convert_to_fp16_f32(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.0)
b = builder.convert_to_fp16(a, name='b')
builder.ret(b)
self.check_block(block, """\
my_block:
%"b" = call i16 @"llvm.convert.to.fp16.f32"(float 0x4014000000000000)
ret i16 %"b"
""") # noqa E501
def test_convert_to_fp16_f32_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
with self.assertRaises(TypeError) as raises:
builder.convert_to_fp16(a, name='b')
self.assertIn(
"expected a float type, got i16",
str(raises.exception))
def test_convert_from_fp16_f32(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int16, 5)
b = builder.convert_from_fp16(a, name='b', to=flt)
builder.ret(b)
self.check_block(block, """\
my_block:
%"b" = call float @"llvm.convert.from.fp16.f32"(i16 5)
ret float %"b"
""")
def test_convert_from_fp16_f32_notype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b')
self.assertIn(
"expected a float return type",
str(raises.exception))
def test_convert_from_fp16_f32_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b', to=flt)
self.assertIn(
"expected an i16 type, got float",
str(raises.exception))
def test_convert_from_fp16_f32_wrongtype2(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5.5)
with self.assertRaises(TypeError) as raises:
builder.convert_from_fp16(a, name='b', to=int16)
self.assertIn(
"expected a float type, got i16",
str(raises.exception))
def test_cttz(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
b = ir.Constant(int1, 1)
c = builder.cttz(a, b, name='c')
builder.ret(c)
self.check_block(block, """\
my_block:
%"c" = call i64 @"llvm.cttz.i64"(i64 5, i1 1)
ret i64 %"c"
""")
def test_cttz_wrongflag(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int64, 5)
b = ir.Constant(int32, 3)
with self.assertRaises(TypeError) as raises:
builder.cttz(a, b, name='c')
self.assertIn(
"expected an i1 type, got i32",
str(raises.exception))
def test_cttz_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(int1, 1)
with self.assertRaises(TypeError) as raises:
builder.cttz(a, b, name='c')
self.assertIn(
"expected an integer type, got float",
str(raises.exception))
def test_fma(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(flt, 1)
c = ir.Constant(flt, 2)
fma = builder.fma(a, b, c, name='fma')
builder.ret(fma)
self.check_block(block, """\
my_block:
%"fma" = call float @"llvm.fma.f32"(float 0x4014000000000000, float 0x3ff0000000000000, float 0x4000000000000000)
ret float %"fma"
""") # noqa E501
def test_fma_wrongtype(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(int32, 5)
b = ir.Constant(int32, 1)
c = ir.Constant(int32, 2)
with self.assertRaises(TypeError) as raises:
builder.fma(a, b, c, name='fma')
self.assertIn(
"expected an floating point type, got i32",
str(raises.exception))
def test_fma_mixedtypes(self):
block = self.block(name='my_block')
builder = ir.IRBuilder(block)
a = ir.Constant(flt, 5)
b = ir.Constant(dbl, 1)
c = ir.Constant(flt, 2)
with self.assertRaises(TypeError) as raises:
builder.fma(a, b, c, name='fma')
self.assertIn(
"expected types to be the same, got float, double, float",
str(raises.exception))
def test_arg_attributes(self):
def gen_code(attr_name):
fnty = ir.FunctionType(ir.IntType(32), [ir.IntType(32).as_pointer(),
ir.IntType(32)])
module = ir.Module()
func = ir.Function(module, fnty, name="sum")
bb_entry = func.append_basic_block()
bb_loop = func.append_basic_block()
bb_exit = func.append_basic_block()
builder = ir.IRBuilder()
builder.position_at_end(bb_entry)
builder.branch(bb_loop)
builder.position_at_end(bb_loop)
index = builder.phi(ir.IntType(32))
index.add_incoming(ir.Constant(index.type, 0), bb_entry)
accum = builder.phi(ir.IntType(32))
accum.add_incoming(ir.Constant(accum.type, 0), bb_entry)
func.args[0].add_attribute(attr_name)
ptr = builder.gep(func.args[0], [index])
value = builder.load(ptr)
added = builder.add(accum, value)
accum.add_incoming(added, bb_loop)
indexp1 = builder.add(index, ir.Constant(index.type, 1))
index.add_incoming(indexp1, bb_loop)
cond = builder.icmp_unsigned('<', indexp1, func.args[1])
builder.cbranch(cond, bb_loop, bb_exit)
builder.position_at_end(bb_exit)
builder.ret(added)
return str(module)
for attr_name in (
'byref',
'byval',
'elementtype',
'immarg',
'inalloca',
'inreg',
'nest',
'noalias',
'nocapture',
'nofree',
'nonnull',
'noundef',
'preallocated',
'returned',
'signext',
'swiftasync',
'swifterror',
'swiftself',
'zeroext',
):
# If this parses, we emitted the right byval attribute format
llvm.parse_assembly(gen_code(attr_name))
# sret doesn't fit this pattern and is tested in test_call_attributes
| TestBuildInstructions |
python | pytorch__pytorch | tools/nightly.py | {
"start": 4304,
"end": 6538
} | class ____(logging.Formatter):
redactions: dict[str, str]
def __init__(self, fmt: str | None = None, datefmt: str | None = None) -> None:
super().__init__(fmt, datefmt)
self.redactions = {}
# Remove sensitive information from URLs
def _filter(self, s: str) -> str:
s = USERNAME_PASSWORD_RE.sub(r"://<USERNAME>:<PASSWORD>@", s)
for needle, replace in self.redactions.items():
s = s.replace(needle, replace)
return s
def formatMessage(self, record: logging.LogRecord) -> str:
if record.levelno == logging.INFO or record.levelno == logging.DEBUG:
# Log INFO/DEBUG without any adornment
return record.getMessage()
else:
# I'm not sure why, but formatMessage doesn't show up
# even though it's in the typeshed for Python >3
return super().formatMessage(record)
def format(self, record: logging.LogRecord) -> str:
return self._filter(super().format(record))
def redact(self, needle: str, replace: str = "<REDACTED>") -> None:
"""Redact specific strings; e.g., authorization tokens. This won't
retroactively redact stuff you've already leaked, so make sure
you redact things as soon as possible.
"""
# Don't redact empty strings; this will lead to something
# that looks like s<REDACTED>t<REDACTED>r<REDACTED>...
if needle == "":
return
self.redactions[needle] = replace
@contextlib.contextmanager
def timer(logger: logging.Logger, prefix: str) -> Iterator[None]:
"""Timed context manager"""
start_time = time.perf_counter()
yield
logger.info("%s took %.3f [s]", prefix, time.perf_counter() - start_time)
F = TypeVar("F", bound=Callable[..., Any])
def timed(prefix: str) -> Callable[[F], F]:
"""Decorator for timing functions"""
def decorator(f: F) -> F:
@functools.wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
logger = cast(logging.Logger, LOGGER)
logger.info(prefix)
with timer(logger, prefix):
return f(*args, **kwargs)
return cast(F, wrapper)
return decorator
| Formatter |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/functional_autograd_benchmark.py | {
"start": 4476,
"end": 10669
} | class ____(NamedTuple):
name: str
getter: GetterType
tasks: list[str]
unsupported: list[str]
MODELS = [
ModelDef("resnet18", vision_models.get_resnet18, FAST_TASKS, []),
ModelDef("fcn_resnet", vision_models.get_fcn_resnet, FAST_TASKS, []),
ModelDef("detr", vision_models.get_detr, FAST_TASKS, []),
ModelDef("ppl_simple_reg", ppl_models.get_simple_regression, ALL_TASKS, []),
ModelDef("ppl_robust_reg", ppl_models.get_robust_regression, ALL_TASKS, []),
ModelDef("wav2letter", audio_text_models.get_wav2letter, FAST_TASKS, []),
ModelDef(
"deepspeech",
audio_text_models.get_deepspeech,
FAST_TASKS_NO_DOUBLE_BACK,
DOUBLE_BACKWARD_TASKS,
),
ModelDef("transformer", audio_text_models.get_transformer, FAST_TASKS, []),
ModelDef("multiheadattn", audio_text_models.get_multiheadattn, FAST_TASKS, []),
]
def get_v_for(model: Callable, inp: InputsType, task: str) -> VType:
v: VType
if task in ["vjp"]:
out = model(*inp)
v = torch.rand_like(out)
elif task in ["jvp", "hvp", "vhp"]:
if isinstance(inp, tuple):
v = tuple(torch.rand_like(i) for i in inp)
else:
v = torch.rand_like(inp)
else:
v = None
return v
def run_once(model: Callable, inp: InputsType, task: str, v: VType, **kwargs) -> None:
func = get_task_func(task)
if v is not None:
func(model, inp, v=v, strict=True)
else:
func(model, inp, strict=True)
def run_once_functorch(
model: Callable, inp: InputsType, task: str, v: VType, maybe_check_consistency=False
) -> None:
func = get_task_functorch(task)
if v is not None:
res = func(model, inp, v=v, strict=True)
else:
res = func(model, inp, strict=True)
if maybe_check_consistency:
af_func = get_task_func(task)
if v is not None:
expected = af_func(model, inp, v=v, strict=True)
else:
expected = af_func(model, inp, strict=True)
atol = 1e-2 if task == "vhp" else 5e-3
torch.testing.assert_close(
res,
expected,
rtol=1e-5,
atol=atol,
msg=f"Consistency fail for task '{task}'",
)
def run_model(
model_getter: GetterType, args: Any, task: str, run_once_fn: Callable = run_once
) -> list[float]:
if args.gpu == -1:
device = torch.device("cpu")
def noop():
pass
do_sync = noop
else:
device = torch.device(f"cuda:{args.gpu}")
do_sync = torch.cuda.synchronize
model, inp = model_getter(device)
v = get_v_for(model, inp, task)
# Warmup
# maybe_check_consistency=True checks for consistency between
# functorch vs autograd.functional and is done in run_once_functorch only
run_once_fn(model, inp, task, v, maybe_check_consistency=True)
elapsed = []
for it in range(args.num_iters):
do_sync()
start = time.time()
run_once_fn(model, inp, task, v)
do_sync()
elapsed.append(time.time() - start)
return elapsed
def main():
parser = ArgumentParser("Main script to benchmark functional API of the autograd.")
parser.add_argument(
"--output", type=str, default="", help="Text file where to write the output"
)
parser.add_argument("--num-iters", type=int, default=10)
parser.add_argument(
"--gpu",
type=int,
default=-2,
help="GPU to use, -1 for CPU and -2 for auto-detect",
)
parser.add_argument(
"--run-slow-tasks", action="store_true", help="Run even the slow tasks"
)
parser.add_argument(
"--model-filter",
type=str,
default="",
help="Only run the models in this filter",
)
parser.add_argument(
"--task-filter", type=str, default="", help="Only run the tasks in this filter"
)
parser.add_argument(
"--num-threads",
type=int,
default=10,
help="Number of concurrent threads to use when running on cpu",
)
parser.add_argument("--seed", type=int, default=0, help="The random seed to use.")
args = parser.parse_args()
results: TimingResultType = defaultdict(defaultdict)
torch.set_num_threads(args.num_threads)
torch.set_num_interop_threads(args.num_threads)
# This automatically seed cuda if it is available
torch.manual_seed(args.seed)
if args.gpu == -2:
args.gpu = 0 if torch.cuda.is_available() else -1
for name, model_getter, recommended_tasks, unsupported_tasks in MODELS:
if args.model_filter and name not in args.model_filter:
continue
tasks = ALL_TASKS if args.run_slow_tasks else recommended_tasks
for task in tasks:
if task in unsupported_tasks:
continue
if args.task_filter and task not in args.task_filter:
continue
runtimes = run_model(model_getter, args, task)
runtimes = torch.tensor(runtimes)
mean, var = runtimes.mean(), runtimes.var()
results[name][task] = (mean.item(), var.item())
print(f"Results for model {name} on task {task}: {mean}s (var: {var})")
if has_functorch:
try:
runtimes = run_model(
model_getter, args, task, run_once_fn=run_once_functorch
)
except RuntimeError as e:
print(
f"Failed model using Functorch: {name}, task: {task}, Error message: \n\t",
e,
)
continue
runtimes = torch.tensor(runtimes)
mean, var = runtimes.mean(), runtimes.var()
results[name][f"functorch {task}"] = (mean.item(), var.item())
print(
f"Results for model {name} on task {task} using Functorch: {mean}s (var: {var})"
)
if args.output:
with open(args.output, "w") as f:
f.write(to_markdown_table(results))
if __name__ == "__main__":
main()
| ModelDef |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-motherduck/destination_motherduck/processors/motherduck.py | {
"start": 2726,
"end": 3219
} | class ____(DuckDBSqlProcessor):
"""A cache implementation for MotherDuck."""
supports_merge_insert = False
@overrides
def _setup(self) -> None:
"""Do any necessary setup, if applicable.
Note: The DuckDB parent class requires pre-creation of local directory structure. We
don't need to do that here so we override the method be a no-op.
"""
# No setup to do and no need to pre-create local file storage.
pass
| MotherDuckSqlProcessor |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/mwaa/auth.py | {
"start": 1091,
"end": 3374
} | class ____(AirflowAuthBackend):
"""A :py:class:`dagster_airlift.core.AirflowAuthBackend` that authenticates to AWS MWAA.
Under the hood, this class uses the MWAA boto3 session to request a web login token and then
uses the token to authenticate to the MWAA web server.
Args:
mwaa_session (boto3.Session): The boto3 MWAA session
env_name (str): The name of the MWAA environment
Examples:
Creating an AirflowInstance pointed at a MWAA environment.
.. code-block:: python
import boto3
from dagster_airlift.mwaa import MwaaSessionAuthBackend
from dagster_airlift.core import AirflowInstance
boto_client = boto3.client("mwaa")
af_instance = AirflowInstance(
name="my-mwaa-instance",
auth_backend=MwaaSessionAuthBackend(
mwaa_client=boto_client,
env_name="my-mwaa-env"
)
)
"""
def __init__(self, mwaa_client: Any, env_name: str) -> None:
self.mwaa_client = mwaa_client
self.env_name = env_name
# Session info is generated when we either try to retrieve a session or retrieve the web server url
self._session_info: Optional[tuple[str, str]] = None
@staticmethod
def from_profile(region: str, env_name: str, profile_name: Optional[str] = None):
boto_session = boto3.Session(profile_name=profile_name, region_name=region)
mwaa = boto_session.client("mwaa")
return MwaaSessionAuthBackend(mwaa_client=mwaa, env_name=env_name)
def get_session(self) -> requests.Session:
# Get the session info
if not self._session_info:
self._session_info = get_session_info(mwaa=self.mwaa_client, env_name=self.env_name)
session_cookie = self._session_info[1]
# Create a new session
session = requests.Session()
session.cookies.set("session", session_cookie)
# Return the session
return session
def get_webserver_url(self) -> str:
if not self._session_info:
self._session_info = get_session_info(mwaa=self.mwaa_client, env_name=self.env_name)
return f"https://{self._session_info[0]}"
| MwaaSessionAuthBackend |
python | huggingface__transformers | tests/models/bridgetower/test_modeling_bridgetower.py | {
"start": 10818,
"end": 17148
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
BridgeTowerModel,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerForContrastiveLearning,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": BridgeTowerModel} if is_torch_available() else {}
is_training = False
test_resize_embeddings = False
has_attentions = False
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_cpu_offload(self):
pass
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_disk_offload(self):
pass
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
pass
# function to extract meaningful tensor from output per different model_class
def extract_output(self, outputs, model_class):
return outputs["pooler_output"] if model_class == "BridgeTowerModel" else outputs["logits"]
def setUp(self):
self.model_tester = BridgeTowerModelTester(self)
self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_image_and_text_retrieval(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_and_text_retrieval(*config_and_inputs)
def test_for_masked_language_modeling(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_language_modeling(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "BridgeTower/bridgetower-base"
model = BridgeTowerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# Override this as `hidden states output` is different for BridgeTower
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states_text, hidden_states_vision, hidden_states_cross = (
outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
)
expected_num_layers = self.model_tester.expected_num_hidden_layers
self.assertEqual(
sum((len(hidden_states_text), len(hidden_states_vision), len(hidden_states_cross))),
expected_num_layers,
)
seq_length = self.model_tester.text_model_tester.seq_length
num_image_features = self.model_tester.vision_model_tester.num_image_features
self.assertListEqual(
list(hidden_states_text[0].shape[-2:]),
[seq_length, self.model_tester.text_model_tester.hidden_size],
)
self.assertListEqual(
list(hidden_states_vision[0].shape),
[num_image_features, 1, self.model_tester.vision_model_tester.hidden_size],
)
self.assertListEqual(
list(hidden_states_cross[0][0].shape[-2:]),
[seq_length, self.model_tester.text_model_tester.hidden_size],
)
self.assertListEqual(
list(hidden_states_cross[0][1].shape[-2:]),
[num_image_features, self.model_tester.vision_model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# Override as `hidden states output` is different for BridgeTower
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = self.has_attentions
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0][0]
hidden_states.retain_grad()
if self.has_attentions:
attentions = outputs.attentions[0][0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
@unittest.skip(reason="""Bridge Tower does not have input/output embeddings. So this test is not applicable.""")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="""Bridge Tower does not have input/output embeddings. Thus this test is not applicable.""")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Bridge Tower does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| BridgeTowerModelTest |
python | getsentry__sentry | src/sentry/issues/endpoints/browser_reporting_collector.py | {
"start": 1830,
"end": 3005
} | class ____(serializers.Serializer[Any]):
"""Serializer for validating browser report data structure."""
body = serializers.DictField()
type = serializers.ChoiceField(choices=BROWSER_REPORT_TYPES)
url = LongURLField()
user_agent = serializers.CharField()
destination = serializers.CharField(required=False)
attempts = serializers.IntegerField(required=False, min_value=1)
# Fields that do not overlap between specs
# We need to support both specs
age = serializers.IntegerField(required=False)
timestamp = serializers.IntegerField(required=False, min_value=0)
def validate_timestamp(self, value: int) -> int:
"""Validate that age is absent, but timestamp is present."""
if self.initial_data.get("age"):
raise serializers.ValidationError("If timestamp is present, age must be absent")
return value
def validate_age(self, value: int) -> int:
"""Validate that age is present, but not timestamp."""
if self.initial_data.get("timestamp"):
raise serializers.ValidationError("If age is present, timestamp must be absent")
return value
| BrowserReportSerializer |
python | django__django | django/tasks/base.py | {
"start": 1367,
"end": 4721
} | class ____:
priority: int
func: Callable # The Task function.
backend: str
queue_name: str
run_after: Optional[datetime] # The earliest this Task will run.
# Whether the Task receives the Task context when executed.
takes_context: bool = False
def __post_init__(self):
self.get_backend().validate_task(self)
@property
def name(self):
return self.func.__name__
def using(
self,
*,
priority=None,
queue_name=None,
run_after=None,
backend=None,
):
"""Create a new Task with modified defaults."""
changes = {}
if priority is not None:
changes["priority"] = priority
if queue_name is not None:
changes["queue_name"] = queue_name
if run_after is not None:
changes["run_after"] = run_after
if backend is not None:
changes["backend"] = backend
return replace(self, **changes)
def enqueue(self, *args, **kwargs):
"""Queue up the Task to be executed."""
return self.get_backend().enqueue(self, args, kwargs)
async def aenqueue(self, *args, **kwargs):
"""Queue up the Task to be executed."""
return await self.get_backend().aenqueue(self, args, kwargs)
def get_result(self, result_id):
"""
Retrieve a task result by id.
Raise TaskResultDoesNotExist if such result does not exist, or raise
TaskResultMismatch if the result exists but belongs to another Task.
"""
result = self.get_backend().get_result(result_id)
if result.task.func != self.func:
raise TaskResultMismatch(
f"Task does not match (received {result.task.module_path!r})"
)
return result
async def aget_result(self, result_id):
"""See get_result()."""
result = await self.get_backend().aget_result(result_id)
if result.task.func != self.func:
raise TaskResultMismatch(
f"Task does not match (received {result.task.module_path!r})"
)
return result
def call(self, *args, **kwargs):
if iscoroutinefunction(self.func):
return async_to_sync(self.func)(*args, **kwargs)
return self.func(*args, **kwargs)
async def acall(self, *args, **kwargs):
if iscoroutinefunction(self.func):
return await self.func(*args, **kwargs)
return await sync_to_async(self.func)(*args, **kwargs)
def get_backend(self):
from . import task_backends
return task_backends[self.backend]
@property
def module_path(self):
return f"{self.func.__module__}.{self.func.__qualname__}"
def task(
function=None,
*,
priority=DEFAULT_TASK_PRIORITY,
queue_name=DEFAULT_TASK_QUEUE_NAME,
backend=DEFAULT_TASK_BACKEND_ALIAS,
takes_context=False,
):
from . import task_backends
def wrapper(f):
return task_backends[backend].task_class(
priority=priority,
func=f,
queue_name=queue_name,
backend=backend,
takes_context=takes_context,
run_after=None,
)
if function:
return wrapper(function)
return wrapper
@dataclass(frozen=True, slots=True, kw_only=True)
| Task |
python | Textualize__textual | docs/examples/guide/widgets/fizzbuzz02.py | {
"start": 144,
"end": 658
} | class ____(Static):
def on_mount(self) -> None:
table = Table("Number", "Fizz?", "Buzz?", expand=True)
for n in range(1, 16):
fizz = not n % 3
buzz = not n % 5
table.add_row(
str(n),
"fizz" if fizz else "",
"buzz" if buzz else "",
)
self.update(table)
def get_content_width(self, container: Size, viewport: Size) -> int:
"""Force content width size."""
return 50
| FizzBuzz |
python | google__jax | jax/example_libraries/optimizers.py | {
"start": 18725,
"end": 20426
} | class ____:
"""Marks the boundary between two joined (nested) pytrees."""
def __init__(self, subtree):
self.subtree = subtree
# Since pytrees are containers of numpy arrays, look iterable.
def __iter__(self):
yield self.subtree
def unpack_optimizer_state(opt_state):
"""Converts an OptimizerState to a marked pytree.
Converts an OptimizerState to a marked pytree with the leaves of the outer
pytree represented as JoinPoints to avoid losing information. This function is
intended to be useful when serializing optimizer states.
Args:
opt_state: An OptimizerState
Returns:
A pytree with JoinPoint leaves that contain a second level of pytrees.
"""
states_flat, tree_def, subtree_defs = opt_state
subtrees = map(jax.tree.unflatten, subtree_defs, states_flat)
sentinels = [JoinPoint(subtree) for subtree in subtrees]
return jax.tree.unflatten(tree_def, sentinels)
def pack_optimizer_state(marked_pytree):
"""Converts a marked pytree to an OptimizerState.
The inverse of unpack_optimizer_state. Converts a marked pytree with the
leaves of the outer pytree represented as JoinPoints back into an
OptimizerState. This function is intended to be useful when deserializing
optimizer states.
Args:
marked_pytree: A pytree containing JoinPoint leaves that hold more pytrees.
Returns:
An equivalent OptimizerState to the input argument.
"""
sentinels, tree_def = jax.tree.flatten(marked_pytree)
assert all(isinstance(s, JoinPoint) for s in sentinels)
subtrees = [s.subtree for s in sentinels]
states_flat, subtree_defs = unzip2(map(jax.tree.flatten, subtrees))
return OptimizerState(states_flat, tree_def, subtree_defs)
| JoinPoint |
python | ray-project__ray | python/ray/autoscaler/v2/instance_manager/config.py | {
"start": 1038,
"end": 1179
} | class ____(Enum):
UNKNOWN = 0
ALIYUN = 1
AWS = 2
AZURE = 3
GCP = 4
KUBERAY = 5
LOCAL = 6
READ_ONLY = 7
| Provider |
python | streamlit__streamlit | lib/tests/streamlit/runtime/context_test.py | {
"start": 8572,
"end": 9114
} | class ____(unittest.TestCase):
"""Test StreamlitTheme class methods."""
def test_theme_init(self):
"""Test StreamlitTheme initialization."""
theme = StreamlitTheme({"type": "dark", "primary": "#FF0000"})
assert theme.type == "dark"
assert theme["primary"] == "#FF0000"
def test_theme_from_context_info(self):
"""Test StreamlitTheme.from_context_info class method."""
theme = StreamlitTheme.from_context_info({"type": "light"})
assert theme.type == "light"
| StreamlitThemeTest |
python | keras-team__keras | keras/src/layers/core/wrapper.py | {
"start": 174,
"end": 1509
} | class ____(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers.
Args:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
try:
assert isinstance(layer, Layer)
except Exception:
raise ValueError(
f"Layer {layer} supplied to Wrapper isn't "
"a supported layer type. Please "
"ensure wrapped layer is a valid Keras layer."
)
super().__init__(**kwargs)
self.layer = layer
def build(self, input_shape=None):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
def get_config(self):
config = {"layer": serialization_lib.serialize_keras_object(self.layer)}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
layer = serialization_lib.deserialize_keras_object(
config.pop("layer"),
custom_objects=custom_objects,
)
return cls(layer, **config)
| Wrapper |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 28392,
"end": 31207
} | class ____(FlowRunUniversalTransform):
"""
Releases deployment concurrency slots held by a flow run.
This rule releases a concurrency slot for a deployment when a flow run
transitions out of the Running or Cancelling state.
"""
async def after_transition(
self,
context: OrchestrationContext[orm_models.FlowRun, core.FlowRunPolicy],
) -> None:
if self.nullified_transition():
return
initial_state_type = (
context.initial_state.type if context.initial_state else None
)
proposed_state_type = (
context.proposed_state.type if context.proposed_state else None
)
# Check if the transition is valid for releasing concurrency slots.
# This should happen within `after_transition` because BaseUniversalTransforms
# don't know how to "fizzle" themselves if they encounter a transition that
# shouldn't apply to them, even if they use FROM_STATES and TO_STATES.
if not (
initial_state_type
in {
states.StateType.RUNNING,
states.StateType.CANCELLING,
states.StateType.PENDING,
}
and proposed_state_type
not in {
states.StateType.PENDING,
states.StateType.RUNNING,
states.StateType.CANCELLING,
}
):
return
if not context.session or not context.run.deployment_id:
return
lease_storage = get_concurrency_lease_storage()
if (
context.initial_state
and context.initial_state.state_details.deployment_concurrency_lease_id
and (
lease := await lease_storage.read_lease(
lease_id=context.initial_state.state_details.deployment_concurrency_lease_id,
)
)
and lease.metadata
):
await concurrency_limits_v2.bulk_decrement_active_slots(
session=context.session,
concurrency_limit_ids=lease.resource_ids,
slots=lease.metadata.slots,
)
await lease_storage.revoke_lease(
lease_id=lease.id,
)
else:
deployment = await deployments.read_deployment(
session=context.session,
deployment_id=context.run.deployment_id,
)
if not deployment or not deployment.concurrency_limit_id:
return
await concurrency_limits_v2.bulk_decrement_active_slots(
session=context.session,
concurrency_limit_ids=[deployment.concurrency_limit_id],
slots=1,
)
| ReleaseFlowConcurrencySlots |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 4774,
"end": 4918
} | class ____(Enum):
"Alignment for `HSplit`."
TOP = "TOP"
CENTER = "CENTER"
BOTTOM = "BOTTOM"
JUSTIFY = "JUSTIFY"
| VerticalAlign |
python | pytorch__pytorch | torch/nn/modules/rnn.py | {
"start": 17559,
"end": 31422
} | class ____(RNNBase):
r"""__init__(input_size,hidden_size,num_layers=1,nonlinearity='tanh',bias=True,batch_first=False,dropout=0.0,bidirectional=False,device=None,dtype=None)
Apply a multi-layer Elman RNN with :math:`\tanh` or :math:`\text{ReLU}`
non-linearity to an input sequence. For each element in the input sequence,
each layer computes the following function:
.. math::
h_t = \tanh(x_t W_{ih}^T + b_{ih} + h_{t-1}W_{hh}^T + b_{hh})
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
.. code-block:: python
# Efficient implementation equivalent to the following with bidirectional=False
rnn = nn.RNN(input_size, hidden_size, num_layers)
params = dict(rnn.named_parameters())
def forward(x, hx=None, batch_first=False):
if batch_first:
x = x.transpose(0, 1)
seq_len, batch_size, _ = x.size()
if hx is None:
hx = torch.zeros(rnn.num_layers, batch_size, rnn.hidden_size)
h_t_minus_1 = hx.clone()
h_t = hx.clone()
output = []
for t in range(seq_len):
for layer in range(rnn.num_layers):
input_t = x[t] if layer == 0 else h_t[layer - 1]
h_t[layer] = torch.tanh(
input_t @ params[f"weight_ih_l{layer}"].T
+ h_t_minus_1[layer] @ params[f"weight_hh_l{layer}"].T
+ params[f"bias_hh_l{layer}"]
+ params[f"bias_ih_l{layer}"]
)
output.append(h_t[-1].clone())
h_t_minus_1 = h_t.clone()
output = torch.stack(output)
if batch_first:
output = output.transpose(0, 1)
return output, h_t
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two RNNs together to form a `stacked RNN`,
with the second RNN taking in outputs of the first RNN and
computing the final results. Default: 1
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
Inputs: input, hx
* **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
:math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence. The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
* **hx**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden
state for the input sequence batch. Defaults to zeros if not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{out} ={} & \text{hidden\_size}
\end{aligned}
Outputs: output, h_n
* **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
:math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the RNN, for each `t`. If a
:class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
will also be a packed sequence.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
for each element in the batch.
Attributes:
weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is
`(hidden_size, num_directions * hidden_size)`
weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
of shape `(hidden_size, hidden_size)`
bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
of shape `(hidden_size)`
bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional RNNs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
.. note::
``batch_first`` argument is ignored for unbatched inputs.
.. include:: ../cudnn_rnn_determinism.rst
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.RNN(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
@overload
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
nonlinearity: str = "tanh",
bias: bool = True,
batch_first: bool = False,
dropout: float = 0.0,
bidirectional: bool = False,
device=None,
dtype=None,
) -> None: ...
@overload
def __init__(self, *args, **kwargs) -> None: ...
def __init__(self, *args, **kwargs):
if "proj_size" in kwargs:
raise ValueError(
"proj_size argument is only supported for LSTM, not RNN or GRU"
)
if len(args) > 3:
self.nonlinearity = args[3]
args = args[:3] + args[4:]
else:
self.nonlinearity = kwargs.pop("nonlinearity", "tanh")
if self.nonlinearity == "tanh":
mode = "RNN_TANH"
elif self.nonlinearity == "relu":
mode = "RNN_RELU"
else:
raise ValueError(
f"Unknown nonlinearity '{self.nonlinearity}'. Select from 'tanh' or 'relu'."
)
super().__init__(mode, *args, **kwargs)
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(
self,
input: Tensor,
hx: Optional[Tensor] = None,
) -> tuple[Tensor, Tensor]:
pass
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(
self,
input: PackedSequence,
hx: Optional[Tensor] = None,
) -> tuple[PackedSequence, Tensor]:
pass
def forward(self, input, hx=None): # noqa: F811
"""
Runs the forward pass.
"""
self._update_flat_weights()
num_directions = 2 if self.bidirectional else 1
orig_input = input
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
# script() is unhappy when max_batch_size is different type in cond branches, so we duplicate
if hx is None:
hx = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
else:
batch_sizes = None
if input.dim() not in (2, 3):
raise ValueError(
f"RNN: Expected input to be 2D or 3D, got {input.dim()}D tensor instead"
)
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
if hx is not None:
if hx.dim() != 2:
raise RuntimeError(
f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor"
)
hx = hx.unsqueeze(1)
else:
if hx is not None and hx.dim() != 3:
raise RuntimeError(
f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor"
)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
hx = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
assert hx is not None
self.check_forward_args(input, hx, batch_sizes)
assert self.mode == "RNN_TANH" or self.mode == "RNN_RELU"
if batch_sizes is None:
if self.mode == "RNN_TANH":
result = _VF.rnn_tanh(
input,
hx,
self._flat_weights, # type: ignore[arg-type]
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
result = _VF.rnn_relu(
input,
hx,
self._flat_weights, # type: ignore[arg-type]
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
if self.mode == "RNN_TANH":
result = _VF.rnn_tanh(
input,
batch_sizes,
hx,
self._flat_weights, # type: ignore[arg-type]
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
else:
result = _VF.rnn_relu(
input,
batch_sizes,
hx,
self._flat_weights, # type: ignore[arg-type]
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
output = result[0]
hidden = result[1]
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(
output,
batch_sizes,
sorted_indices,
unsorted_indices,
)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
if not is_batched: # type: ignore[possibly-undefined]
output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
hidden = hidden.squeeze(1)
return output, self.permute_hidden(hidden, unsorted_indices)
# XXX: LSTM and GRU implementation is different from RNNBase, this is because:
# 1. we want to support nn.LSTM and nn.GRU in TorchScript and TorchScript in
# its current state could not support the python Union Type or Any Type
# 2. TorchScript static typing does not allow a Function or Callable type in
# Dict values, so we have to separately call _VF instead of using _rnn_impls
# 3. This is temporary only and in the transition state that we want to make it
# on time for the release
#
# More discussion details in https://github.com/pytorch/pytorch/pull/23266
#
# TODO: remove the overriding implementations for LSTM and GRU when TorchScript
# support expressing these two modules generally.
| RNN |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_highlight.py | {
"start": 14857,
"end": 16588
} | class ____(util.MdCase):
"""Test global line number cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'linenums': True
}
}
def test_global_line_numbers(self):
"""Test that global line numbers works."""
self.check_markdown(
r'''
```python
import test
test.test()
```
''',
r'''
<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span><span class="normal">1</span>
<span class="normal">2</span></pre></div></td><td class="code"><div><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
<span class="n">test</span><span class="o">.</span><span class="n">test</span><span class="p">()</span>
</code></pre></div></td></tr></table></div>
''', # noqa: E501
True
)
def test_global_disabling_of_line_numbers(self):
"""Test that global line numbers can be disabled."""
self.check_markdown(
r'''
```{.python linenums="0"}
import test
test.test()
```
''',
r'''
<div class="highlight"><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
<span class="n">test</span><span class="o">.</span><span class="n">test</span><span class="p">()</span>
</code></pre></div>
''', # noqa: E501
True
)
| TestGlobalLinenums |
python | openai__openai-python | tests/test_legacy_response.py | {
"start": 350,
"end": 1938
} | class ____(pydantic.BaseModel): ...
def test_response_parse_mismatched_basemodel(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
with pytest.raises(
TypeError,
match="Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`",
):
response.parse(to=PydanticModel)
@pytest.mark.parametrize(
"content, expected",
[
("false", False),
("true", True),
("False", False),
("True", True),
("TrUe", True),
("FalSe", False),
],
)
def test_response_parse_bool(client: OpenAI, content: str, expected: bool) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=content),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
result = response.parse(to=bool)
assert result is expected
def test_response_parse_custom_stream(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=b"foo"),
client=client,
stream=True,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
stream = response.parse(to=Stream[int])
assert stream._cast_to == int
| PydanticModel |
python | psf__black | tests/data/cases/fmtonoff5.py | {
"start": 1548,
"end": 2810
} | class ____(t.Protocol):
def this_will_be_formatted ( self, **kwargs ) -> Named: ...
# fmt: on
# Regression test for https://github.com/psf/black/issues/3436.
if x:
return x
# fmt: off
elif unformatted:
# fmt: on
will_be_formatted ()
# output
# Regression test for https://github.com/psf/black/issues/3129.
setup(
entry_points={
# fmt: off
"console_scripts": [
"foo-bar"
"=foo.bar.:main",
# fmt: on
] # Includes an formatted indentation.
},
)
# Regression test for https://github.com/psf/black/issues/2015.
run(
# fmt: off
[
"ls",
"-la",
]
# fmt: on
+ path,
check=True,
)
# Regression test for https://github.com/psf/black/issues/3026.
def test_func():
# yapf: disable
if unformatted( args ):
return True
# yapf: enable
elif b:
return True
return False
# Regression test for https://github.com/psf/black/issues/2567.
if True:
# fmt: off
for _ in range( 1 ):
# fmt: on
print ( "This won't be formatted" )
print ( "This won't be formatted either" )
else:
print("This will be formatted")
# Regression test for https://github.com/psf/black/issues/3184.
| Factory |
python | keon__algorithms | tests/test_strings.py | {
"start": 8582,
"end": 9005
} | class ____(unittest.TestCase):
"""[summary]
Test for the file multiply_strings.py
Arguments:
unittest {[type]} -- [description]
"""
def test_multiply(self):
self.assertEqual("23", multiply("1", "23"))
self.assertEqual("529", multiply("23", "23"))
self.assertEqual("0", multiply("0", "23"))
self.assertEqual("1000000", multiply("100", "10000"))
| TestMultiplyStrings |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/storage_tests/test_instigation_logger.py | {
"start": 536,
"end": 719
} | class ____(NoOpComputeLogManager):
@contextmanager
def open_log_stream(self, log_key, io_type):
raise Exception("OOPS")
yield None
| CrashyStartupComputeLogManager |
python | mlflow__mlflow | mlflow/genai/judges/tools/get_span_performance_and_timing_report.py | {
"start": 944,
"end": 1154
} | class ____:
"""Information about concurrent span execution."""
span1_num: str
span2_num: str
span1_name: str
span2_name: str
overlap_s: float
@experimental(version="3.5.0")
| ConcurrentPair |
python | walkccc__LeetCode | solutions/2557. Maximum Number of Integers to Choose From a Range II/2557.py | {
"start": 0,
"end": 478
} | class ____:
def maxCount(self, banned: list[int], n: int, maxSum: int) -> int:
bannedSet = set(banned)
l = 1
r = n
while l < r:
m = (l + r + 1) // 2
if self._getSum(bannedSet, m) > maxSum:
r = m - 1
else:
l = m
return l - sum(b <= l for b in banned)
# Returns sum([1..m]) - sum(bannedSet).
def _getSum(self, bannedSet: set[int], m: int) -> int:
return m * (m + 1) // 2 - sum(b for b in bannedSet if b <= m)
| Solution |
python | huggingface__transformers | tests/models/llava_onevision/test_modeling_llava_onevision.py | {
"start": 1453,
"end": 6039
} | class ____:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=1,
video_token_index=2,
projector_hidden_act="gelu",
seq_length=7,
vision_feature_select_strategy="full",
vision_feature_layer=-1,
text_config={
"model_type": "qwen2",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 580,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 0,
},
is_training=True,
vision_config={
"image_size": 16,
"patch_size": 8,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_image_tokens = 10
self.seq_length = seq_length + self.num_image_tokens
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = 3
self.image_size = 30
self.image_grid_pinpoints = [[16, 16]]
def get_config(self):
return LlavaOnevisionConfig(
text_config=self.text_config,
vision_config=self.vision_config,
ignore_index=self.ignore_index,
image_token_index=self.image_token_index,
video_token_index=self.video_token_index,
projector_hidden_act=self.projector_hidden_act,
vision_feature_select_strategy=self.vision_feature_select_strategy,
vision_feature_layer=self.vision_feature_layer,
image_grid_pinpoints=self.image_grid_pinpoints,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
3,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_index
labels = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device)
labels[:, : self.num_image_tokens] == self.ignore_index
inputs_dict = {
"pixel_values": pixel_values,
"image_sizes": torch.tensor([[45, 45]] * self.batch_size),
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels,
}
return config, inputs_dict
@require_torch
| LlavaOnevisionVisionText2TextModelTester |
python | weaviate__weaviate-python-client | weaviate/collections/batch/base.py | {
"start": 4295,
"end": 5327
} | class ____(Generic[Obj], BatchRequest[Obj, BatchObjectReturn]):
"""Collect objects for one batch request to weaviate."""
def pop_items(self, pop_amount: int) -> List[Obj]:
"""Pop the given number of items from the BatchRequest queue.
Returns:
A list of items from the BatchRequest.
"""
self._lock.acquire()
if pop_amount >= len(self._items):
ret = copy(self._items)
self._items.clear()
else:
ret = copy(self._items[:pop_amount])
self._items = self._items[pop_amount:]
self._lock.release()
return ret
def head(self) -> Optional[Obj]:
"""Get the first item from the BatchRequest queue without removing it.
Returns:
The first item from the BatchRequest or None if the queue is empty.
"""
self._lock.acquire()
item = self._items[0] if len(self._items) > 0 else None
self._lock.release()
return item
@dataclass
| ObjectsBatchRequest |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/mitm_addons.py | {
"start": 178,
"end": 1242
} | class ____:
"""This addon sorts query parameters in the request URL.
It is useful for testing purposes, as it makes it easier to compare requests and get cache hits.
"""
def request(self, flow: http.HTTPFlow) -> None:
if url := flow.request.url:
parsed_url = urlparse(url)
# Get query parameters as dictionary
query_params = parse_qs(parsed_url.query)
# Sort query parameters alphabetically
sorted_params = {key: query_params[key] for key in sorted(query_params.keys())}
# Reconstruct the URL with sorted query parameters
sorted_url = parsed_url._replace(query=urlencode(sorted_params, doseq=True)).geturl()
# Update the request URL
flow.request.url = sorted_url
# Disabling the addon.
# It can alter the request URL when some connector URL are already encoded.
# See discussion here https://github.com/airbytehq/airbyte-internal-issues/issues/9302#issuecomment-2311854334
# addons = [SortQueryParams()]
addons = []
| SortQueryParams |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 68476,
"end": 70228
} | class ____(FromClauseAlias):
"""Represent a TABLESAMPLE clause.
This object is constructed from the :func:`_expression.tablesample` module
level function as well as the :meth:`_expression.FromClause.tablesample`
method
available on all :class:`_expression.FromClause` subclasses.
.. seealso::
:func:`_expression.tablesample`
"""
__visit_name__ = "tablesample"
_traverse_internals: _TraverseInternalsType = (
AliasedReturnsRows._traverse_internals
+ [
("sampling", InternalTraversal.dp_clauseelement),
("seed", InternalTraversal.dp_clauseelement),
]
)
@classmethod
def _factory(
cls,
selectable: _FromClauseArgument,
sampling: Union[float, Function[Any]],
name: Optional[str] = None,
seed: Optional[roles.ExpressionElementRole[Any]] = None,
) -> TableSample:
return coercions.expect(roles.FromClauseRole, selectable).tablesample(
sampling, name=name, seed=seed
)
@util.preload_module("sqlalchemy.sql.functions")
def _init( # type: ignore[override]
self,
selectable: Any,
*,
name: Optional[str] = None,
sampling: Union[float, Function[Any]],
seed: Optional[roles.ExpressionElementRole[Any]] = None,
) -> None:
assert sampling is not None
functions = util.preloaded.sql_functions
if not isinstance(sampling, functions.Function):
sampling = functions.func.system(sampling)
self.sampling: Function[Any] = sampling
self.seed = seed
super()._init(selectable, name=name)
def _get_method(self) -> Function[Any]:
return self.sampling
| TableSample |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_graphs.py | {
"start": 4180,
"end": 6831
} | class ____(object):
"""Graph input tracer using depth-first search."""
def __init__(self,
input_lists,
skip_node_names=None,
destination_node_name=None):
"""Constructor of _DFSGraphTracer.
Args:
input_lists: A list of dicts. Each dict is an adjacency (input) map from
the recipient node name as the key and the list of input node names
as the value.
skip_node_names: Optional: a list of node names to skip tracing.
destination_node_name: Optional: destination node name. If not `None`, it
should be the name of a destination not as a str and the graph tracing
will raise GraphTracingReachedDestination as soon as the node has been
reached.
Raises:
GraphTracingReachedDestination: if stop_at_node_name is not None and
the specified node is reached.
"""
self._input_lists = input_lists
self._skip_node_names = skip_node_names
self._inputs = []
self._visited_nodes = []
self._depth_count = 0
self._depth_list = []
self._destination_node_name = destination_node_name
def trace(self, graph_element_name):
"""Trace inputs.
Args:
graph_element_name: Name of the node or an output tensor of the node, as a
str.
Raises:
GraphTracingReachedDestination: if destination_node_name of this tracer
object is not None and the specified node is reached.
"""
self._depth_count += 1
node_name = get_node_name(graph_element_name)
if node_name == self._destination_node_name:
raise GraphTracingReachedDestination()
if node_name in self._skip_node_names:
return
if node_name in self._visited_nodes:
return
self._visited_nodes.append(node_name)
for input_list in self._input_lists:
if node_name not in input_list:
continue
for inp in input_list[node_name]:
if get_node_name(inp) in self._visited_nodes:
continue
self._inputs.append(inp)
self._depth_list.append(self._depth_count)
self.trace(inp)
self._depth_count -= 1
def inputs(self):
return self._inputs
def depth_list(self):
return self._depth_list
def _infer_device_name(graph_def):
"""Infer device name from a partition GraphDef."""
device_name = None
for node in graph_def.node:
if node.device:
device_name = node.device
break
if device_name is None:
logging.warn(
"Failed to infer device name from partition GraphDef: none of the "
"nodes of the GraphDef has a non-empty device name.")
return device_name
| DFSGraphTracer |
python | optuna__optuna | tests/storages_tests/rdb_tests/test_models.py | {
"start": 10716,
"end": 12724
} | class ____:
@staticmethod
def _create_model(session: Session) -> TrialModel:
direction = StudyDirectionModel(direction=StudyDirection.MINIMIZE, objective=0)
study = StudyModel(study_id=1, study_name="test-study", directions=[direction])
trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE)
session.add(study)
session.add(trial)
session.add(
TrialValueModel(
trial_id=trial.trial_id,
objective=0,
value=10,
value_type=TrialValueModel.TrialValueType.FINITE,
)
)
session.commit()
return trial
@staticmethod
def test_find_by_trial_and_objective(session: Session) -> None:
trial = TestTrialValueModel._create_model(session)
trial_value = TrialValueModel.find_by_trial_and_objective(trial, 0, session)
assert trial_value is not None
assert 10 == trial_value.value
assert TrialValueModel.find_by_trial_and_objective(trial, 1, session) is None
@staticmethod
def test_where_trial_id(session: Session) -> None:
trial = TestTrialValueModel._create_model(session)
trial_values = TrialValueModel.where_trial_id(trial.trial_id, session)
assert 1 == len(trial_values)
assert 0 == trial_values[0].objective
assert 10 == trial_values[0].value
@staticmethod
def test_cascade_delete_on_trial(session: Session) -> None:
trial = TestTrialValueModel._create_model(session)
trial.values.append(
TrialValueModel(
trial_id=1, objective=1, value=20, value_type=TrialValueModel.TrialValueType.FINITE
)
)
session.commit()
assert 2 == len(TrialValueModel.where_trial_id(trial.trial_id, session))
session.delete(trial)
session.commit()
assert 0 == len(TrialValueModel.where_trial_id(trial.trial_id, session))
| TestTrialValueModel |
python | doocs__leetcode | solution/0800-0899/0826.Most Profit Assigning Work/Solution2.py | {
"start": 0,
"end": 386
} | class ____:
def maxProfitAssignment(
self, difficulty: List[int], profit: List[int], worker: List[int]
) -> int:
m = max(difficulty)
f = [0] * (m + 1)
for d, p in zip(difficulty, profit):
f[d] = max(f[d], p)
for i in range(1, m + 1):
f[i] = max(f[i], f[i - 1])
return sum(f[min(w, m)] for w in worker)
| Solution |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_exclude_lists_audit.py | {
"start": 15171,
"end": 16900
} | class ____:
"""Test suite to verify commands respect exclude lists and return clean results."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
def test_check_rst_symbols_all_with_exclude_lists(self):
"""Test that check rst-symbols --all returns clean results with exclude lists."""
result = self.runner.invoke(check, ["rst-symbols", "--all"])
# With exclude lists properly applied, should have no issues
assert result.exit_code == 0, f"Command failed with output: {result.output}"
assert "✓" in result.output
assert "All RST documented symbols have @public decorators" in result.output
def test_check_public_symbols_all_with_exclude_lists(self):
"""Test that check public-symbols --all returns clean results with exclude lists."""
result = self.runner.invoke(check, ["public-symbols", "--all"])
# With exclude lists properly applied, should have no issues
assert result.exit_code == 0, f"Command failed with output: {result.output}"
assert "✓" in result.output
assert "All @public symbols are documented in RST and exported top-level" in result.output
def test_check_exports_all_with_exclude_lists(self):
"""Test that check exports --all returns clean results with exclude lists."""
result = self.runner.invoke(check, ["exports", "--all"])
# With exclude lists properly applied, should have no issues
assert result.exit_code == 0, f"Command failed with output: {result.output}"
assert "✓" in result.output
assert "All exports are properly documented and decorated" in result.output
| TestCheckCommandsWithExcludeLists |
python | doocs__leetcode | solution/0800-0899/0856.Score of Parentheses/Solution.py | {
"start": 0,
"end": 297
} | class ____:
def scoreOfParentheses(self, s: str) -> int:
ans = d = 0
for i, c in enumerate(s):
if c == '(':
d += 1
else:
d -= 1
if s[i - 1] == '(':
ans += 1 << d
return ans
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1145604,
"end": 1145888
} | class ____(VegaLiteSchema):
"""ScaleInvalidDataShowAsstrokeOpacity schema wrapper."""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"strokeOpacity">'}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ScaleInvalidDataShowAsstrokeOpacity |
python | pandas-dev__pandas | asv_bench/benchmarks/algos/isin.py | {
"start": 5851,
"end": 7431
} | class ____:
"""
A subset of the cartesian product of cases have special motivations:
"nans" x "nans"
if nan-objects are different objects,
this has the potential to trigger O(n^2) running time
"short" x "long"
running time dominated by the preprocessing
"long" x "short"
running time dominated by look-up
"long" x "long"
no dominating part
"long_floats" x "long_floats"
because of nans floats are special
no dominating part
"""
variants = ["nans", "short", "long", "long_floats"]
params = [variants, variants]
param_names = ["series_type", "vals_type"]
def setup(self, series_type, vals_type):
N_many = 10**5
if series_type == "nans":
ser_vals = np.full(10**4, np.nan)
elif series_type == "short":
ser_vals = np.arange(2)
elif series_type == "long":
ser_vals = np.arange(N_many)
elif series_type == "long_floats":
ser_vals = np.arange(N_many, dtype=np.float64)
self.series = Series(ser_vals).astype(object)
if vals_type == "nans":
values = np.full(10**4, np.nan)
elif vals_type == "short":
values = np.arange(2)
elif vals_type == "long":
values = np.arange(N_many)
elif vals_type == "long_floats":
values = np.arange(N_many, dtype=np.float64)
self.values = values.astype(object)
def time_isin(self, series_type, vals_type):
self.series.isin(self.values)
| IsInForObjects |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 261460,
"end": 261933
} | class ____(sgqlc.types.Input):
"""Ordering options for project v2 field connections"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2FieldOrderField), graphql_name="field")
"""The field to order the project v2 fields by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| ProjectV2FieldOrder |
python | numba__numba | numba/core/event.py | {
"start": 2489,
"end": 5206
} | class ____:
"""An event.
Parameters
----------
kind : str
status : EventStatus
data : any; optional
Additional data for the event.
exc_details : 3-tuple; optional
Same 3-tuple for ``__exit__``.
"""
def __init__(self, kind, status, data=None, exc_details=None):
self._kind = _guard_kind(kind)
self._status = status
self._data = data
self._exc_details = (None
if exc_details is None or exc_details[0] is None
else exc_details)
@property
def kind(self):
"""Event kind
Returns
-------
res : str
"""
return self._kind
@property
def status(self):
"""Event status
Returns
-------
res : EventStatus
"""
return self._status
@property
def data(self):
"""Event data
Returns
-------
res : object
"""
return self._data
@property
def is_start(self):
"""Is it a *START* event?
Returns
-------
res : bool
"""
return self._status == EventStatus.START
@property
def is_end(self):
"""Is it an *END* event?
Returns
-------
res : bool
"""
return self._status == EventStatus.END
@property
def is_failed(self):
"""Is the event carrying an exception?
This is used for *END* event. This method will never return ``True``
in a *START* event.
Returns
-------
res : bool
"""
return self._exc_details is None
def __str__(self):
data = (f"{type(self.data).__qualname__}"
if self.data is not None else "None")
return f"Event({self._kind}, {self._status}, data: {data})"
__repr__ = __str__
_registered = defaultdict(list)
def register(kind, listener):
"""Register a listener for a given event kind.
Parameters
----------
kind : str
listener : Listener
"""
assert isinstance(listener, Listener)
kind = _guard_kind(kind)
_registered[kind].append(listener)
def unregister(kind, listener):
"""Unregister a listener for a given event kind.
Parameters
----------
kind : str
listener : Listener
"""
assert isinstance(listener, Listener)
kind = _guard_kind(kind)
lst = _registered[kind]
lst.remove(listener)
def broadcast(event):
"""Broadcast an event to all registered listeners.
Parameters
----------
event : Event
"""
for listener in _registered[event.kind]:
listener.notify(event)
| Event |
python | huggingface__transformers | examples/modular-transformers/modular_multimodal2.py | {
"start": 2248,
"end": 2387
} | class ____(CLIPVisionModel, Multimodal2VisionPreTrainedModel):
_no_split_modules = ["Multimodal2VisionEncoderLayer"]
| Multimodal2VisionModel |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/yaxis/_tickfont.py | {
"start": 235,
"end": 9914
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.yaxis"
_path_str = "layout.scene.yaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.yaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.yaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.yaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | django__django | tests/template_tests/filter_tests/test_timesince.py | {
"start": 6101,
"end": 6496
} | class ____(SimpleTestCase):
def test_since_now(self):
self.assertEqual(timesince_filter(datetime.now() - timedelta(1)), "1\xa0day")
def test_no_args(self):
self.assertEqual(timesince_filter(None), "")
def test_explicit_date(self):
self.assertEqual(
timesince_filter(datetime(2005, 12, 29), datetime(2005, 12, 30)), "1\xa0day"
)
| FunctionTests |
python | django__django | tests/gis_tests/geoapp/models.py | {
"start": 991,
"end": 1140
} | class ____(NamedModel):
city = models.ForeignKey(City, models.CASCADE)
point = models.PointField()
poly = models.PolygonField()
| MultiFields |
python | scikit-learn__scikit-learn | sklearn/utils/metaestimators.py | {
"start": 415,
"end": 6234
} | class ____(BaseEstimator, metaclass=ABCMeta):
"""Base class for estimators that are composed of named sub-estimators.
This abstract class provides parameter management functionality for
meta-estimators that contain collections of named estimators. It handles
the complex logic for getting and setting parameters on nested estimators
using the "estimator_name__parameter" syntax.
The class is designed to work with any attribute containing a list of
(name, estimator) tuples.
"""
@abstractmethod
def __init__(self):
pass
def _get_params(self, attr, deep=True):
out = super().get_params(deep=deep)
if not deep:
return out
estimators = getattr(self, attr)
try:
out.update(estimators)
except (TypeError, ValueError):
# Ignore TypeError for cases where estimators is not a list of
# (name, estimator) and ignore ValueError when the list is not
# formatted correctly. This is to prevent errors when calling
# `set_params`. `BaseEstimator.set_params` calls `get_params` which
# can error for invalid values for `estimators`.
return out
for name, estimator in estimators:
if hasattr(estimator, "get_params"):
for key, value in estimator.get_params(deep=True).items():
out["%s__%s" % (name, key)] = value
return out
def _set_params(self, attr, **params):
# Ensure strict ordering of parameter setting:
# 1. Replace the entire estimators collection
if attr in params:
setattr(self, attr, params.pop(attr))
# 2. Replace individual estimators by name
items = getattr(self, attr)
if isinstance(items, list) and items:
# Get item names used to identify valid names in params
# `zip` raises a TypeError when `items` does not contains
# elements of length 2
with suppress(TypeError):
item_names, _ = zip(*items)
for name in list(params.keys()):
if "__" not in name and name in item_names:
self._replace_estimator(attr, name, params.pop(name))
# 3. Individual estimator parameters and other initialisation arguments
super().set_params(**params)
return self
def _replace_estimator(self, attr, name, new_val):
# assumes `name` is a valid estimator name
new_estimators = list(getattr(self, attr))
for i, (estimator_name, _) in enumerate(new_estimators):
if estimator_name == name:
new_estimators[i] = (name, new_val)
break
setattr(self, attr, new_estimators)
def _validate_names(self, names):
if len(set(names)) != len(names):
raise ValueError("Names provided are not unique: {0!r}".format(list(names)))
invalid_names = set(names).intersection(self.get_params(deep=False))
if invalid_names:
raise ValueError(
"Estimator names conflict with constructor arguments: {0!r}".format(
sorted(invalid_names)
)
)
invalid_names = [name for name in names if "__" in name]
if invalid_names:
raise ValueError(
"Estimator names must not contain __: got {0!r}".format(invalid_names)
)
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels.
Slice X, y according to indices for cross-validation, but take care of
precomputed kernel-matrices or pairwise affinities / distances.
If ``estimator._pairwise is True``, X needs to be square and
we slice rows and columns. If ``train_indices`` is not None,
we slice rows using ``indices`` (assumed the test set) and columns
using ``train_indices``, indicating the training set.
Labels y will always be indexed only along the first axis.
Parameters
----------
estimator : object
Estimator to determine whether we should slice only rows or rows and
columns.
X : array-like, sparse matrix or iterable
Data to be indexed. If ``estimator._pairwise is True``,
this needs to be a square array-like or sparse matrix.
y : array-like, sparse matrix or iterable
Targets to be indexed.
indices : array of int
Rows to select from X and y.
If ``estimator._pairwise is True`` and ``train_indices is None``
then ``indices`` will also be used to slice columns.
train_indices : array of int or None, default=None
If ``estimator._pairwise is True`` and ``train_indices is not None``,
then ``train_indices`` will be use to slice the columns of X.
Returns
-------
X_subset : array-like, sparse matrix or list
Indexed data.
y_subset : array-like, sparse matrix or list
Indexed targets.
"""
if get_tags(estimator).input_tags.pairwise:
if not hasattr(X, "shape"):
raise ValueError(
"Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices."
)
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = _safe_indexing(X, indices)
if y is not None:
y_subset = _safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
| _BaseComposition |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_custom_report.py | {
"start": 3581,
"end": 10152
} | class ____(TestBaseCustomReport):
custom_report_aggregation = "Daily"
report_file = "custom_report"
incremental_report_file = "custom_report_incremental"
records_number = 8
report_file_with_records_further_start_date = "custom_report_with_record_further_start_date"
incremental_report_file_with_records_further_cursor = "custom_report_incremental_with_records_further_cursor"
state_file = "custom_report"
state_file_legacy = "custom_report"
state_file_after_migration = "non_hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"non_hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
first_read_state = get_state_after_migration(time_period="2024-05-17", account_id=TestSuiteReportStream.account_id)
second_read_state = get_state_after_migration(time_period="2023-12-25", account_id=TestSuiteReportStream.account_id)
first_read_state_for_records_further_start_date = get_state_after_migration(
time_period="2024-05-17", account_id=TestSuiteReportStream.account_id
)
second_read_state_for_records_further_start_date = get_state_after_migration(
time_period="2024-05-07", account_id=TestSuiteReportStream.account_id
)
second_read_state_for_records_before_start_date = get_state_after_migration(
time_period="2024-01-01", account_id=TestSuiteReportStream.account_id
)
def _mock_report_apis(self):
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Daily", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Daily", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Daily", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 1, "Month": 1, "Year": 2023}, "CustomDateRangeEnd": {"Day": 6, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
self.mock_generate_report_api(
endpoint="Submit",
response_template="generate_report",
body=b'{"ReportRequest": {"ExcludeColumnHeaders": false, "ExcludeReportFooter": true, "ExcludeReportHeader": true, "Format": "Csv", "FormatVersion": "2.0", "ReportName": "AgeGenderAudienceReport", "ReturnOnlyCompleteData": false, "Type": "AgeGenderAudienceReportRequest", "Aggregation": "Daily", "Columns": ["AccountName", "AccountNumber", "AccountId", "TimePeriod", "CampaignName", "CampaignId", "AdGroupName", "AdGroupId", "AdDistribution", "AgeGroup", "Gender", "Impressions", "Clicks", "Conversions", "Spend", "Revenue", "ExtendedCost", "Assists", "Language", "AccountStatus", "CampaignStatus", "AdGroupStatus", "BaseCampaignId", "AllConversions", "AllRevenue", "ViewThroughConversions", "Goal", "GoalType", "AbsoluteTopImpressionRatePercent", "TopImpressionRatePercent", "ConversionsQualified", "AllConversionsQualified", "ViewThroughConversionsQualified", "ViewThroughRevenue"], "Scope": {"AccountIds": [180535609]}, "Time": {"CustomDateRangeStart": {"Day": 6, "Month": 5, "Year": 2024}, "CustomDateRangeEnd": {"Day": 8, "Month": 5, "Year": 2024}, "ReportTimeZone": "GreenwichMeanTimeDublinEdinburghLisbonLondon"}}}',
)
| TestCustomReportDaily |
python | miyuchina__mistletoe | test/test_span_token.py | {
"start": 5348,
"end": 5743
} | class ____(TestBranchToken):
def test_parse(self):
self._test_parse(span_token.Image, '', 'alt', src='link')
self._test_parse(span_token.Image, '', 'alt',
src='link', title='title')
def test_no_alternative_text(self):
self._test_parse(span_token.Image, '', '', children=False, src='link')
| TestImage |
python | ray-project__ray | rllib/models/tf/tf_action_dist.py | {
"start": 18523,
"end": 20550
} | class ____(TFActionDistribution):
"""
A Beta distribution is defined on the interval [0, 1] and parameterized by
shape parameters alpha and beta (also called concentration parameters).
PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
and Gamma(n) = (n - 1)!
"""
def __init__(
self,
inputs: List[TensorType],
model: ModelV2,
low: float = 0.0,
high: float = 1.0,
):
# Stabilize input parameters (possibly coming from a linear layer).
inputs = tf.clip_by_value(inputs, log(SMALL_NUMBER), -log(SMALL_NUMBER))
inputs = tf.math.log(tf.math.exp(inputs) + 1.0) + 1.0
self.low = low
self.high = high
alpha, beta = tf.split(inputs, 2, axis=-1)
# Note: concentration0==beta, concentration1=alpha (!)
self.dist = tfp.distributions.Beta(concentration1=alpha, concentration0=beta)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
mean = self.dist.mean()
return self._squash(mean)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
return self._squash(self.dist.sample())
@override(ActionDistribution)
def logp(self, x: TensorType) -> TensorType:
unsquashed_values = self._unsquash(x)
return tf.math.reduce_sum(self.dist.log_prob(unsquashed_values), axis=-1)
def _squash(self, raw_values: TensorType) -> TensorType:
return raw_values * (self.high - self.low) + self.low
def _unsquash(self, values: TensorType) -> TensorType:
return (values - self.low) / (self.high - self.low)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
return np.prod(action_space.shape, dtype=np.int32) * 2
@OldAPIStack
| Beta |
python | networkx__networkx | networkx/algorithms/approximation/tests/test_traveling_salesman.py | {
"start": 3635,
"end": 5293
} | class ____(TestBase):
def test_greedy(self):
cycle = nx_app.greedy_tsp(self.DG, source="D")
cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 31.0)
cycle = nx_app.greedy_tsp(self.DG2, source="D")
cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 78.0)
cycle = nx_app.greedy_tsp(self.UG, source="D")
cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 33.0)
cycle = nx_app.greedy_tsp(self.UG2, source="D")
cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
validate_solution(cycle, cost, ["D", "C", "A", "B", "D"], 27.0)
def test_not_complete_graph(self):
pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteUG)
pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteDG)
def test_not_weighted_graph(self):
nx_app.greedy_tsp(self.unweightedUG)
nx_app.greedy_tsp(self.unweightedDG)
def test_two_nodes(self):
G = nx.Graph()
G.add_weighted_edges_from({(1, 2, 1)})
cycle = nx_app.greedy_tsp(G)
cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle))
validate_solution(cycle, cost, [1, 2, 1], 2)
def test_ignore_selfloops(self):
G = nx.complete_graph(5)
G.add_edge(3, 3)
cycle = nx_app.greedy_tsp(G)
assert len(cycle) - 1 == len(G) == len(set(cycle))
| TestGreedyTSP |
python | openai__openai-python | src/openai/types/chat/chat_completion_content_part_input_audio_param.py | {
"start": 488,
"end": 704
} | class ____(TypedDict, total=False):
input_audio: Required[InputAudio]
type: Required[Literal["input_audio"]]
"""The type of the content part. Always `input_audio`."""
| ChatCompletionContentPartInputAudioParam |
python | scipy__scipy | scipy/linalg/tests/test_decomp.py | {
"start": 95585,
"end": 103282
} | class ____:
@classmethod
def setup_class(cls):
# https://www.nag.com/lapack-ex/node119.html
A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j,
7.5 + 0.5j],
[-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j,
-10.5 - 1.5j],
[4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j,
-7.5 - 3.5j],
[5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j,
-19.0 - 32.5j]])
B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j],
[0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j],
[1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j],
[0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]])
# https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml
A2 = np.array([[3.9, 12.5, -34.5, -0.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 21.5, -43.5, 3.5],
[4.4, 26.0, -46.0, 6.0]])
B2 = np.array([[1, 2, -3, 1],
[1, 3, -5, 4],
[1, 3, -4, 3],
[1, 3, -4, 4]])
# example with the eigenvalues
# -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j,
# 0.61244091
# thus featuring:
# * one complex conjugate eigenvalue pair,
# * one eigenvalue in the lhp
# * 2 eigenvalues in the unit circle
# * 2 non-real eigenvalues
A3 = np.array([[5., 1., 3., 3.],
[4., 4., 2., 7.],
[7., 4., 1., 3.],
[0., 4., 8., 7.]])
B3 = np.array([[8., 10., 6., 10.],
[7., 7., 2., 9.],
[9., 1., 6., 6.],
[5., 1., 4., 7.]])
# example with infinite eigenvalues
A4 = np.eye(2)
B4 = np.diag([0, 1])
# example with (alpha, beta) = (0, 0)
A5 = np.diag([1, 0])
cls.A = [A1, A2, A3, A4, A5]
cls.B = [B1, B2, B3, B4, A5]
def qz_decomp(self, sort):
with np.errstate(all='raise'):
ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)]
return tuple(ret)
def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z):
Id = np.eye(*A.shape)
# make sure Q and Z are orthogonal
assert_array_almost_equal(Q @ Q.T.conj(), Id)
assert_array_almost_equal(Z @ Z.T.conj(), Id)
# check factorization
assert_array_almost_equal(Q @ AA, A @ Z)
assert_array_almost_equal(Q @ BB, B @ Z)
# check shape of AA and BB
assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape))
assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape))
# check eigenvalues
for i in range(A.shape[0]):
# does the current diagonal element belong to a 2-by-2 block
# that was already checked?
if i > 0 and A[i, i - 1] != 0:
continue
# take care of 2-by-2 blocks
if i < AA.shape[0] - 1 and AA[i + 1, i] != 0:
evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2])
# make sure the pair of complex conjugate eigenvalues
# is ordered consistently (positive imaginary part first)
if evals[0].imag < 0:
evals = evals[[1, 0]]
tmp = alpha[i:i + 2]/beta[i:i + 2]
if tmp[0].imag < 0:
tmp = tmp[[1, 0]]
assert_array_almost_equal(evals, tmp)
else:
if alpha[i] == 0 and beta[i] == 0:
assert_equal(AA[i, i], 0)
assert_equal(BB[i, i], 0)
elif beta[i] == 0:
assert_equal(BB[i, i], 0)
else:
assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i])
sortfun = _select_function(sort)
lastsort = True
for i in range(A.shape[0]):
cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]]))
# once the sorting criterion was not matched all subsequent
# eigenvalues also shouldn't match
if not lastsort:
assert not cursort
lastsort = cursort
def check_all(self, sort):
ret = self.qz_decomp(sort)
for reti, Ai, Bi in zip(ret, self.A, self.B):
self.check(Ai, Bi, sort, *reti)
def test_lhp(self):
self.check_all('lhp')
def test_rhp(self):
self.check_all('rhp')
def test_iuc(self):
self.check_all('iuc')
def test_ouc(self):
self.check_all('ouc')
def test_ref(self):
# real eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0
return out
self.check_all(sort)
def test_cef(self):
# complex eigenvalues first (top-left corner)
def sort(x, y):
out = np.empty_like(x, dtype=bool)
nonzero = (y != 0)
out[~nonzero] = False
out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0
return out
self.check_all(sort)
def test_diff_input_types(self):
ret = ordqz(self.A[1], self.B[2], sort='lhp')
self.check(self.A[1], self.B[2], 'lhp', *ret)
ret = ordqz(self.B[2], self.A[1], sort='lhp')
self.check(self.B[2], self.A[1], 'lhp', *ret)
def test_sort_explicit(self):
# Test order of the eigenvalues in the 2 x 2 case where we can
# explicitly compute the solution
A1 = np.eye(2)
B1 = np.diag([-2, 0.5])
expected1 = [('lhp', [-0.5, 2]),
('rhp', [2, -0.5]),
('iuc', [-0.5, 2]),
('ouc', [2, -0.5])]
A2 = np.eye(2)
B2 = np.diag([-2 + 1j, 0.5 + 0.5j])
expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]),
('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]),
('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])]
# 'lhp' is ambiguous so don't test it
A3 = np.eye(2)
B3 = np.diag([2, 0])
expected3 = [('rhp', [0.5, np.inf]),
('iuc', [0.5, np.inf]),
('ouc', [np.inf, 0.5])]
# 'rhp' is ambiguous so don't test it
A4 = np.eye(2)
B4 = np.diag([-2, 0])
expected4 = [('lhp', [-0.5, np.inf]),
('iuc', [-0.5, np.inf]),
('ouc', [np.inf, -0.5])]
A5 = np.diag([0, 1])
B5 = np.diag([0, 0.5])
# 'lhp' and 'iuc' are ambiguous so don't test them
expected5 = [('rhp', [2, np.nan]),
('ouc', [2, np.nan])]
A = [A1, A2, A3, A4, A5]
B = [B1, B2, B3, B4, B5]
expected = [expected1, expected2, expected3, expected4, expected5]
for Ai, Bi, expectedi in zip(A, B, expected):
for sortstr, expected_eigvals in expectedi:
_, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr)
azero = (alpha == 0)
bzero = (beta == 0)
x = np.empty_like(alpha)
x[azero & bzero] = np.nan
x[~azero & bzero] = np.inf
x[~bzero] = alpha[~bzero]/beta[~bzero]
assert_allclose(expected_eigvals, x)
| TestOrdQZ |
python | django-debug-toolbar__django-debug-toolbar | tests/middleware.py | {
"start": 38,
"end": 456
} | class ____:
"""
This middleware exists to use the cache before and after
the toolbar is setup.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
cache.set("UseCacheAfterToolbar.before", 1)
response = self.get_response(request)
cache.set("UseCacheAfterToolbar.after", 1)
return response
| UseCacheAfterToolbar |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/numbers.py | {
"start": 20473,
"end": 21002
} | class ____(SearchStrategy[float]):
"""Strategy for sampling the space of nan float values."""
def do_draw(self, data: ConjectureData) -> float:
# Nans must have all exponent bits and the first mantissa bit set, so
# we generate by taking 64 random bits and setting the required ones.
sign_bit = int(data.draw_boolean()) << 63
nan_bits = float_to_int(math.nan)
mantissa_bits = data.draw_integer(0, 2**52 - 1)
return int_to_float(sign_bit | nan_bits | mantissa_bits)
| NanStrategy |
python | coleifer__peewee | tests/regressions.py | {
"start": 832,
"end": 903
} | class ____(TestModel):
name = TextField(column_name='pname')
| ColAlias |
python | google__jax | jax/_src/array.py | {
"start": 6126,
"end": 51632
} | class ____(basearray.Array):
aval: core.ShapedArray
_sharding: Sharding
_arrays: list[ArrayImpl]
_committed: bool
_skip_checks: bool
_npy_value: np.ndarray | None
@use_cpp_method()
def __init__(self, aval: core.ShapedArray, sharding: Sharding,
arrays: Sequence[ArrayImpl],
committed: bool, _skip_checks: bool = False):
# NOTE: the actual implementation of the constructor is moved to C++.
self.aval = aval
self._sharding = sharding
self._committed = committed
self._npy_value = None
arrays = [a._arrays[0] for a in arrays]
# Don't rearrange if skip_checks is enabled because this assumes that the
# input buffers are already arranged properly. This usually happens when
# Array's are created as output of a JAX transformation
# (like pjit, etc).
if not _skip_checks or config.enable_checks.value:
arrays = self._check_and_rearrange(arrays, self._sharding, self.aval)
self._arrays = arrays
def _check_and_rearrange(self, arrays, sharding, aval):
device_id_to_buffer = {_get_device(db).id: db for db in arrays}
addressable_dev = sharding.addressable_devices
if len(arrays) != len(addressable_dev):
raise ValueError(
f"Expected {len(addressable_dev)} per-device arrays "
"(this is how many devices are addressable by the sharding), but "
f"got {len(arrays)}")
array_device_ids = set(device_id_to_buffer.keys())
addressable_device_ids = {d.id for d in addressable_dev}
if len(array_device_ids) != len(arrays):
buffer_device_ids = [_get_device(db).id for db in arrays]
raise ValueError(
"When making an array from single-device arrays, the input arrays"
" must be from distinct devices, but got device IDs"
f" {buffer_device_ids}")
# Calculate a symmetric difference because the device ids between sharding
# and _arrays should match.
diff = array_device_ids ^ addressable_device_ids
if diff:
dev_in_sharding_not_in_arrays = addressable_device_ids - array_device_ids
dev_in_arrays_not_in_sharding = array_device_ids - addressable_device_ids
err_msg = (
"Addressable devices and per-device arrays devices do not match.")
if dev_in_sharding_not_in_arrays:
err_msg += (f" Sharding contains devices {dev_in_sharding_not_in_arrays} "
"that are not present in per-device arrays.")
if dev_in_arrays_not_in_sharding:
err_msg += (f" Per-device arrays contain devices {dev_in_arrays_not_in_sharding} "
"that are not present in the sharding.")
raise ValueError(err_msg)
_validate_shape_and_dtype_for_per_device_arrays(
arrays,
sharding=sharding,
aval=aval,
expected_shape=sharding.shard_shape(aval.shape),
)
# Rearrange arrays based on the device assignment.
addressable_da = sharding._addressable_device_assignment
return [device_id_to_buffer[device.id] for device in addressable_da]
@property
def shape(self) -> Shape:
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return math.prod(self.shape)
@property
def sharding(self):
return self._sharding
@property
def device(self):
self._check_if_deleted()
if isinstance(self.sharding, SingleDeviceSharding):
return list(self.sharding.device_set)[0]
return self.sharding
@property
def weak_type(self):
return self.aval.weak_type
@property
def committed(self) -> bool:
return self._committed
def __str__(self):
return str(self._value)
def __len__(self):
try:
return self.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
def __bool__(self):
core.check_bool_conversion(self)
return bool(self._value)
def __float__(self):
core.check_scalar_conversion(self)
return self._value.__float__()
def __int__(self):
core.check_scalar_conversion(self)
return self._value.__int__()
def __complex__(self):
core.check_scalar_conversion(self)
return self._value.__complex__()
def __hex__(self):
core.check_integer_conversion(self)
return hex(self._value)
def __oct__(self):
core.check_integer_conversion(self)
return oct(self._value)
def __index__(self):
core.check_integer_conversion(self)
return op.index(self._value)
def tobytes(self, order="C"):
return self._value.tobytes(order)
def tolist(self):
return self._value.tolist()
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
def __getitem__(self, idx):
from jax._src.lax import lax # pytype: disable=import-error
from jax._src.numpy import indexing # pytype: disable=import-error
self._check_if_deleted()
if isinstance(self.sharding, PmapSharding):
if config.pmap_no_rank_reduction.value:
cidx = idx if isinstance(idx, tuple) else (idx,)
padded_cidx = tuple(
slice(i, i + 1, None) if isinstance(i, int) else i for i in cidx
) + (slice(None),) * (len(self.shape) - len(cidx))
else:
if not isinstance(idx, tuple):
padded_cidx = (idx,) + (slice(None),) * (len(self.shape) - 1)
else:
padded_cidx = idx + (slice(None),) * (len(self.shape) - len(idx))
indices = tuple(self.sharding.devices_indices_map(self.shape).values())
try:
arr_idx = indices.index(padded_cidx)
except ValueError:
arr_idx = None
if arr_idx is not None:
out = self._arrays[arr_idx]
sharding = SingleDeviceSharding(_get_device(out))
if config.pmap_no_rank_reduction.value:
# If cidx was the index of a single shard, then it corresponds to one
# shard of the chunked dimension.
dims = tuple(i for i, x in enumerate(cidx) if isinstance(x, int))
# Squeeze on committed arrays to avoid data movement to shard 0.
out = lax.squeeze(out, dimensions=dims)
return ArrayImpl(
out.aval, sharding, [out], committed=False, _skip_checks=True)
return indexing.rewriting_take(self, idx)
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
assert self.is_fully_replicated or self.is_fully_addressable
if dispatch.is_single_device_sharding(self.sharding) or self.is_fully_replicated:
return (sl for chunk in self._chunk_iter(100) for sl in chunk._unstack())
elif isinstance(self.sharding, PmapSharding):
return (self[i] for i in range(self.shape[0]))
else:
# TODO(yashkatariya): Don't bounce to host and use `_chunk_iter` path
# here after uneven partitioning support is added.
return (api.device_put(self._value[i]) for i in range(self.shape[0]))
@property
def is_fully_replicated(self) -> bool:
return self.sharding.is_fully_replicated
def __repr__(self):
prefix = 'Array('
if self.aval is not None and self.aval.weak_type:
dtype_str = f'dtype={self.dtype.name}, weak_type=True)'
else:
dtype_str = f'dtype={self.dtype.name})'
if self.is_fully_addressable or self.is_fully_replicated:
line_width = np.get_printoptions()["linewidth"]
if self.size == 0:
s = f"[], shape={self.shape}"
elif not self.sharding.has_addressable_devices:
s = f"shape={self.shape}"
else:
s = np.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return f"{prefix}{s},{sep}{dtype_str}"
else:
return f"{prefix}shape={self.shape}, {dtype_str}"
@property
def is_fully_addressable(self) -> bool:
"""Is this Array fully addressable?
A jax.Array is fully addressable if the current process can address all of
the devices named in the :class:`Sharding`. ``is_fully_addressable`` is
equivalent to "is_local" in multi-process JAX.
Note that fully replicated is not equal to fully addressable i.e.
a jax.Array which is fully replicated can span across multiple hosts and is
not fully addressable.
"""
return self.sharding.is_fully_addressable
def __array__(self, dtype=None, context=None, copy=None):
# copy argument is supported by np.asarray starting in numpy 2.0
kwds = {} if copy is None else {'copy': copy}
return np.asarray(self._value, dtype=dtype, **kwds)
def __dlpack__(self, *, stream: int | Any | None = None,
max_version: tuple[int, int] | None = None,
dl_device: tuple[DLDeviceType, int] | None = None,
copy: bool | None = None):
from jax._src.dlpack import to_dlpack # pytype: disable=import-error # pylint: disable=g-import-not-at-top
device_set = self.sharding.device_set
if len(device_set) > 1:
raise BufferError(
"to_dlpack can only pack a dlpack tensor from an array on a singular "
f"device, but an array with a Sharding over {len(device_set)} devices "
"was provided."
)
device, = device_set
return to_dlpack(self, stream=stream,
max_version=max_version,
src_device=device,
dl_device=dl_device,
copy=copy)
def __dlpack_device__(self) -> tuple[enum.Enum, int]:
if len(self._arrays) != 1:
raise BufferError("__dlpack__ only supported for unsharded arrays.")
from jax._src.dlpack import DLDeviceType # pytype: disable=import-error # pylint: disable=g-import-not-at-top
if self.platform() == "cpu":
return DLDeviceType.kDLCPU, 0
elif self.platform() == "gpu":
platform_version = _get_device(self).client.platform_version
if "cuda" in platform_version:
dl_device_type = DLDeviceType.kDLCUDA
elif "rocm" in platform_version:
dl_device_type = DLDeviceType.kDLROCM
else:
raise BufferError("Unknown GPU platform for __dlpack__: "
f"{platform_version}")
local_hardware_id = _get_device(self).local_hardware_id
if local_hardware_id is None:
raise BufferError("Couldn't get local_hardware_id for __dlpack__")
return dl_device_type, local_hardware_id
else:
raise BufferError(
"__dlpack__ device only supported for CPU and GPU, got platform: "
f"{self.platform()}"
)
def __reduce__(self):
fun, args, arr_state = self._value.__reduce__()
aval_state = {'weak_type': self.aval.weak_type}
return (_reconstruct_array, (fun, args, arr_state, aval_state))
@use_cpp_method()
def unsafe_buffer_pointer(self):
if len(self._arrays) != 1:
raise ValueError("unsafe_buffer_pointer() is supported only for unsharded"
" arrays.")
return self._arrays[0].unsafe_buffer_pointer()
@property
@use_cpp_method()
def __cuda_array_interface__(self):
if len(self._arrays) != 1:
raise ValueError("__cuda_array_interface__() is supported only for "
"unsharded arrays.")
return self._arrays[0].__cuda_array_interface__ # pytype: disable=attribute-error # bind-properties
@use_cpp_method()
def on_device_size_in_bytes(self):
"""Returns the total global on-device size of the array in bytes."""
arr = self._arrays[0]
per_shard_size = arr.on_device_size_in_bytes()
return per_shard_size * self.sharding.num_devices
def devices(self) -> set[Device]:
self._check_if_deleted()
return self.sharding.device_set
@property
def device_buffer(self):
raise AttributeError(
"arr.device_buffer has been deprecated. Use arr.addressable_data(0)")
@property
def device_buffers(self):
raise AttributeError(
"arr.device_buffers has been deprecated. Use [x.data for x in arr.addressable_shards]")
def addressable_data(self, index: int) -> ArrayImpl:
self._check_if_deleted()
if self.is_fully_replicated:
return self._fully_replicated_shard()
return self._arrays[index]
@functools.cached_property
def addressable_shards(self) -> Sequence[Shard]:
self._check_if_deleted()
out = []
for a in self._arrays:
out.append(Shard(_get_device(a), self.sharding, self.shape, a))
return out
@property
def format(self):
# TODO(yashkatariya): Remove the deleted check from here.
if self.is_deleted():
return Format(None, self.sharding)
try:
return Format(Layout.from_pjrt_layout(self._pjrt_layout),
self.sharding)
except _jax.JaxRuntimeError as e:
msg, *_ = e.args
if type(msg) is str and msg.startswith("UNIMPLEMENTED"):
return Format(None, self.sharding)
else:
raise
@property
def global_shards(self) -> Sequence[Shard]:
"""Returns list of all `Shard`s of the Array across all devices.
The result includes shards that are not addressable by the current process.
If a `Shard` is not addressable, then its `data` will be `None`.
"""
self._check_if_deleted()
if self.is_fully_addressable: # pylint: disable=using-constant-test
return self.addressable_shards
out = []
device_id_to_buffer = {_get_device(a).id: a for a in self._arrays}
for global_d in self.sharding.device_set:
if device_id_to_buffer.get(global_d.id, None) is not None:
array = device_id_to_buffer[global_d.id]
else:
array = None
out.append(Shard(global_d, self.sharding, self.shape, array))
return out
@use_cpp_method()
def delete(self):
if self._arrays is None:
return
for buf in self._arrays:
buf.delete()
self._arrays = None
self._npy_value = None
@use_cpp_method()
def is_deleted(self):
if self._arrays is None:
return True
# This path is taken when a view of `Array` is created and the original
# Array is deleted. In that case, the buffers the view represents also get
# deleted.
return any(buf.is_deleted() for buf in self._arrays)
def _check_if_deleted(self):
if self.is_deleted():
raise RuntimeError(
f"Array has been deleted with shape={self.aval.str_short()}.")
@use_cpp_method()
def block_until_ready(self):
self._check_if_deleted()
for db in self._arrays:
db.block_until_ready()
return self
@use_cpp_method()
def _single_device_array_to_np_array_did_copy(self) -> tuple[np.ndarray, bool]: # type: ignore
... # pytype: disable=bad-return-type
@use_cpp_method()
def _copy_single_device_array_to_host_async(self):
self._arrays[0].copy_to_host_async()
@profiler.annotate_function
def copy_to_host_async(self):
self._check_if_deleted()
if self._npy_value is None:
if self.is_fully_replicated and self.sharding.has_addressable_devices:
self._copy_single_device_array_to_host_async()
return
for i, _ in _cached_index_calc(self.sharding, self.shape):
self._arrays[i]._copy_single_device_array_to_host_async()
@property
@functools.partial(profiler.annotate_function, name="np.asarray(jax.Array)")
def _value(self) -> np.ndarray:
self._check_if_deleted()
if self._npy_value is None:
# addressable_device_list can be empty. If it's empty, we will error below
if self.is_fully_replicated and self.sharding.has_addressable_devices:
npy_value, did_copy = self._single_device_array_to_np_array_did_copy()
npy_value.flags.writeable = False
if did_copy:
self._npy_value = npy_value
return npy_value
# TODO(yashkatariya): Merge `_process_has_full_value_in_mcjax` with
# is_fully_addressable.
# is_fully_addressable return False if addressable_device_list is empty.
if (not self.is_fully_addressable and
not _process_has_full_value_in_mcjax(self.sharding, self.shape)):
raise RuntimeError(
"Fetching value for `jax.Array` that spans non-addressable"
" (non process local) devices is not possible. You can use"
" `jax.experimental.multihost_utils.process_allgather` to print the"
" global array or use `.addressable_shards` method of jax.Array to"
" inspect the addressable (process local) shards."
)
for i, _ in _cached_index_calc(self.sharding, self.shape):
self._arrays[i]._copy_single_device_array_to_host_async()
npy_value = np.empty(self.shape, self.dtype)
for i, ind in _cached_index_calc(self.sharding, self.shape):
npy_value[ind], _ = self._arrays[i]._single_device_array_to_np_array_did_copy()
self._npy_value = npy_value
self._npy_value.flags.writeable = False
return self._npy_value
# TODO(b/273265390): ideally we would write this as a decorator on the ArrayImpl
# class, however this triggers a pytype bug. Workaround: apply the decorator
# after the fact.
if not TYPE_CHECKING:
ArrayImpl = use_cpp_class(xc.ArrayImpl)(ArrayImpl)
def _get_shape_from_index(slc: Index, shape: Shape) -> Shape:
return tuple(
(s.stop or dim) - (s.start or 0)
for s, dim in safe_zip(slc, shape)
if isinstance(s, slice) # If element is int, this dimension is reduced
)
def _get_and_check_dtype(
arrays: Sequence[basearray.Array | np.ndarray | literals.TypedNdArray],
dtype: DTypeLike | ExtendedDType | None,
fname: str,
):
if dtype is None:
if arrays:
dtype = arrays[0].dtype
else:
raise ValueError(
"If the Array has no addressable shards, `dtype` must be provided "
f"via the `dtype` argument to `jax.{fname}`.")
else:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, fname)
if arrays and arrays[0].dtype != dtype:
raise ValueError(
f"If `dtype` is provided to `jax.{fname}`, it must match the dtype "
f"of the addressable shards. Got dtype={dtype} and shard "
f"dtype={arrays[0].dtype}`.")
return dtype
# explicitly set to be unhashable.
setattr(ArrayImpl, "__hash__", None)
setattr(ArrayImpl, "__array_priority__", 100)
# TODO(yashkatariya): Remove None from callback input type.
def make_array_from_callback(
shape: Shape, sharding: Sharding | Format,
data_callback: Callable[[Index | None], ArrayLike],
dtype: DTypeLike | None = None) -> ArrayImpl:
# pyformat: disable
"""Returns a ``jax.Array`` via data fetched from ``data_callback``.
``data_callback`` is used to fetch the data for each addressable shard of the
returned ``jax.Array``. This function must return concrete arrays, meaning that
``make_array_from_callback`` has limited compatibility with JAX transformations
like :func:`jit` or :func:`vmap`.
Args:
shape : Shape of the ``jax.Array``.
sharding: A ``Sharding`` instance which describes how the ``jax.Array`` is
laid out across devices.
data_callback : Callback that takes indices into the global array value as
input and returns the corresponding data of the global array value.
The data can be returned as any array-like object, e.g. a ``numpy.ndarray``.
dtype: The dtype of the output ``jax.Array``. If not provided, the dtype of
the data for the first addressable shard is used. If there are no
addressable shards, the ``dtype`` argument must be provided.
Returns:
A ``jax.Array`` via data fetched from ``data_callback``.
Examples:
>>> import math
>>> from jax.sharding import Mesh
>>> from jax.sharding import PartitionSpec as P
>>> import numpy as np
...
>>> input_shape = (8, 8)
>>> global_input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
>>> global_mesh = Mesh(np.array(jax.devices()).reshape(2, 4), ('x', 'y'))
>>> inp_sharding = jax.sharding.NamedSharding(global_mesh, P('x', 'y'))
...
>>> def cb(index):
... return global_input_data[index]
...
>>> arr = jax.make_array_from_callback(input_shape, inp_sharding, cb)
>>> arr.addressable_data(0).shape
(4, 2)
"""
# pyformat: enable
dll = sharding.layout if isinstance(sharding, Format) else None
if isinstance(dll, AutoLayout):
raise TypeError(
"`Layout.AUTO` cannot be used in place of a device-local"
f" layout when calling `jax.make_array_from_callback`. Got {sharding}")
sharding = sharding.sharding if isinstance(sharding, Format) else sharding
if not isinstance(sharding, Sharding):
raise TypeError(
f"sharding should be an instance of `jax.sharding`. Got {sharding} of"
f" type {type(sharding)}")
def get_data(
index: Index | None,
) -> ArrayImpl | literals.TypedNdArray | np.ndarray:
# Perhaps cache on index here, then we can unify fully_replicated
# and non-fully_replicated cases below and become faster for
# partially replicated cases.
assert index is not None
r = data_callback(index)
if isinstance(r, core.Tracer):
raise errors.UnexpectedTracerError(
"jax.make_array_from_callback cannot be called within a traced"
" context."
)
# Value can be python scalars, resolve it into something with dtype.
r = dtypes.canonicalize_value(r)
if isinstance(r, (literals.TypedInt, literals.TypedFloat,
literals.TypedComplex)):
r = literals.TypedNdArray(np.asarray(r, dtype=r.dtype), weak_type=False)
elif isinstance(r, bool):
r = literals.TypedNdArray(np.asarray(r, dtype=np.bool_), weak_type=False)
return r
if sharding.is_fully_replicated:
devices = list(sharding._internal_device_list.addressable_device_list) # type: ignore
# Only compute data once.
per_device_values = [get_data((slice(None),) * len(shape))] * len(devices)
else:
device_to_index_map = sharding.addressable_devices_indices_map(shape)
devices = list(device_to_index_map.keys())
per_device_values = [
get_data(device_to_index_map[device]) for device in devices
]
dtype = _get_and_check_dtype(
per_device_values, dtype, "make_array_from_callback")
expected_shape = sharding.shard_shape(shape)
aval = core.update_aval_with_sharding(
core.ShapedArray(shape, dtype), sharding)
_validate_shape_and_dtype_for_per_device_arrays(
per_device_values,
expected_shape=expected_shape,
aval=aval,
sharding=sharding,
)
first_value = None
if per_device_values:
first_value = per_device_values[0]
if (isinstance(first_value, ArrayImpl)
and first_value._committed
and sharding.is_fully_replicated
and first_value.is_fully_replicated
and first_value.sharding._device_assignment == tuple(devices)
and first_value.format.layout == dll):
return first_value
if dtypes.issubdtype(aval.dtype, dtypes.extended):
# TODO(yashkatariya): Can this also use batched_device_put?
arrays = api.device_put(per_device_values, devices)
return aval.dtype._rules.make_sharded_array(
aval, sharding, arrays, committed=True
)
if dll is not None:
devices = [Format(dll, SingleDeviceSharding(d)) for d in devices] # type: ignore
# pxla.batched_device_put doesn't support Layout... Take the slow route
arrays = api.device_put(per_device_values, devices)
return ArrayImpl(aval, sharding, arrays, committed=True)
if isinstance(first_value, ArrayImpl) and len(first_value.devices()) > 1:
# The output of the callback is already a sharded array, move it to
# to target device.
per_device_values = api.device_put(per_device_values, devices)
return pxla.batched_device_put(aval, sharding, per_device_values, devices)
def make_array_from_process_local_data(
sharding, # PyTree[jax.sharding.Sharding]
local_data, # PyTree[np.ndarray]
global_shape=None): # PyTree[Shape]
# pyformat: disable
"""Creates distributed tensor using the data available in process.
This function is a common special case of `make_array_from_callback`. It
assumes that the data is available in the process and takes care of the
index wrangling.
The most common case is when the sharding is sharded across the batch
dimension and each host just loads its corresponding sub-batch. This function
supports more general cases as well, such as mixed multi-host and multi-axis
replication and sharding but you would need to compute the size and the
contents of process-local data correctly to satisfy the sharding constraints.
In particular, if any two hosts are replicas, host_local_data should be
identical as well.
The global_shape is optional. If not provided it will be be inferred from
the local_data and sharding, under the assumption that
each host represents only their own data for uniform sharding. If sharding
is non-uniform, (see note below) an exception will be raised.
Setting global_shape explicitly allows for finer grain control and works with
non-uniform shardings. Each dimension of global_shape must either match
host_local_data, or match the inferred global shape of the sharding (in which
case it is equivalent to setting it to None, but is more explicit).
For example if dimension `i` is fully sharded then this size would be
`per_device_shape[i] * jax.local_device_count()`. Each device will be mapped
into local slice of `local_data` array. For example, if given process
addresses slices (8, 12) and (24, 28), then these slices will be mapped
into (0, 4) and (4, 8) of the `local_data`.
For each dimension where global_shapes matches local_shape, each device
will lookup the slice in the local_data. For example if
global_shape == local_data.shape, the local data is assumed to be the
actual target array that will be sharded into device.
If global_shape is the same as local_data.shape, then the data must
be the same across all hosts.
Examples:
>>> from jax.sharding import PartitionSpec as P
>>> mesh_rows = 2
>>> mesh_cols = jax.device_count() // 2
...
>>> mesh = jax.sharding.Mesh(np.array(jax.devices()).reshape(mesh_rows, mesh_cols), ('x', 'y'))
>>> sharding = jax.sharding.NamedSharding(mesh, P(('x', 'y'),))
>>> rows_per_device = 2
>>> feature_length = 32
>>> per_device_shape = (rows_per_device, feature_length)
>>> per_host_shape = (rows_per_device * len(mesh.local_devices), feature_length)
>>> per_host_generator = lambda : np.arange(np.prod(per_host_shape)).reshape(per_host_shape)
>>> per_host_data = per_host_generator() # replace with your own per-host data pipeline that outputs numpy arrays
>>> global_shape = (rows_per_device * len(sharding.device_set), ) + per_device_shape[1:]
>>> output_global_array = jax.make_array_from_process_local_data(sharding, per_host_data, global_shape)
...
>>> assert output_global_array.addressable_data(0).shape == per_device_shape
>>> assert output_global_array.shape == global_shape
NB: While most shardings are uniform, It is possible to design am exotic
sharding mesh where each process's devices will be arranged in a non-grid
like pattern in some dimensions, or for indices to overlap non-trivially.
Such sharding is called "non-uniform" in those dimensions. In that case,
the global shape along those directions must match local shape as there is
no meaningful way to represent all needed
per-process data in non-overlapping fashion. For example for global_shape 4x4
if sharding looks like this:
0123
2103
4675
4567
with 4 processes, containing devices (0,1), (2, 3), (4, 5), (6, 7) respectively.
Then the data for each host look like
xx.. ..xx .... ....
.xx. x..x .... ....
.... .... x..x .xx.
.... .... xx.. ..xx
the sharding is uniform on rows (each host requires either rows 1-2, or rows 3-4)
and non-uniform on columns (hosts require overlapping but not matching
set of columns). Thus local data must have the shape 2x4 or 4x4
for all hosts, even though each host can potentially fit into 2x2 shape.
In this case user must provide global_shape explicitly and for
local_shape=(2, 4), potentially valid global shapes are (2, 4) and (4, 4).
On the other hand for sharding:
0213 x.x. .x.x. .... ....
0213 x.x. .x.x. .... ....
4657 .... .... .x.x x.x.
4657 .... .... .x.x x.x.
for local_shape=(2, 2) this function can accept a choice of 2x2, 2x4, 4x2
and 4x4 global shapes. Setting global_shape to None, is equivalent to
setting it to (4, 4) in this case.
Args:
sharding: Sharding of the global array.
local_data: Data on the host to be placed on local devices. Each
dimension should either match global_shape, or match
num_addressable_indices(dim).
global_shape: The target shape of the global array. If None,
will infer from local_data and sharding.
Returns:
Tensor that will have sharding=sharding and of shape global_shape.
"""
# pyformat: enable
local_data_flat, treedef = tree_flatten(local_data)
sharding_flat = broadcast_prefix(sharding, local_data)
sharding_flat = map(
partial(api.pspec_to_sharding, 'make_array_from_process_local_data'),
sharding_flat)
global_shape_flat = broadcast_prefix(
global_shape, local_data,
is_leaf=lambda x: x is None or isinstance(x, tuple))
if xla_bridge.process_count() == 1:
# Safety check if the provided data doesn't match expected global_shape
for s, d in zip(global_shape_flat, local_data_flat):
if s is not None and s != d.shape:
raise ValueError(
"When calling `make_array_from_process_local_data` on a single"
" process, global_shape should be None or equal to"
f" local_data.shape.Got global_shape={s} and"
f" local_data.shape={d.shape}."
)
return api.device_put(local_data, sharding)
out = [_array_from_process_local_data(data, s, shape)
for data, s, shape in zip(local_data_flat, sharding_flat, global_shape_flat)]
return tree_unflatten(treedef, out)
def _array_from_process_local_data(
local_data: np.ndarray, sharding: Sharding,
global_shape: Shape | None = None) -> ArrayImpl:
# TODO(sandler): consider supporting partially specified global_shape or
# making local_to_global_shape available in the api.
local_shape = local_data.shape
if global_shape is None:
global_shape = local_to_global_shape(sharding, local_shape) # type: ignore[assignment]
assert global_shape is not None
if None in global_shape:
raise ValueError(
"Unable to compute global_shape due to non-uniform sharding."
f" Specify global shape directly. Partially computed {global_shape=}."
)
elif None in global_shape:
raise ValueError(f"{global_shape=} has Nones. This is not supported.")
full_dim = []
for i, (data_dim, global_dim) in enumerate(
zip(local_data.shape, global_shape)
):
full_dim.append(data_dim == global_dim)
if data_dim != global_dim:
process_slice = num_addressable_indices(sharding, i, global_shape)
if process_slice != data_dim:
raise ValueError(
"Invalid host data, each dimension should match either global or "
f"process shape. In dimension {i}, the process data has {data_dim} "
f"elements. Process addresses {process_slice} elements and "
f"{global_shape=}."
)
addressable_shards = sharding.addressable_devices_indices_map(global_shape)
shard = next(iter(addressable_shards.values()))
assert shard is not None
shard_shape = _get_shape_from_index(shard, global_shape)
slices_for_each_dim: list[list[int]] = [[] for _ in global_shape]
for shard_index in addressable_shards.values():
assert shard_index is not None
for i, slc in enumerate(shard_index):
slices_for_each_dim[i].append(slc.start or 0)
for i in range(len(global_shape)):
slices_for_each_dim[i] = sorted(set(slices_for_each_dim[i]))
@functools.lru_cache(maxsize=4096)
def local_slice(i, start):
# Looks up the index of this slice in the list of slices for this dimension.
# This will determine the slice in host_local_data
start = slices_for_each_dim[i].index(start or 0) * shard_shape[i]
end = start + shard_shape[i]
return slice(start, end)
def cb(index: Index | None) -> ArrayLike:
assert index is not None
data_slice = (
slc if full_dim[i] else local_slice(i, slc.start)
for i, slc in enumerate(index)
)
return local_data[tuple(data_slice)]
return make_array_from_callback(global_shape, sharding, cb)
def make_array_from_single_device_arrays(
shape: Shape, sharding: Sharding, arrays: Sequence[basearray.Array], *,
dtype: DTypeLike | None = None,
) -> ArrayImpl:
r"""Returns a ``jax.Array`` from a sequence of ``jax.Array``\s each on a single device.
Every device in input ``sharding``\'s mesh must have an array in ``arrays``\s.
Args:
shape : Shape of the output ``jax.Array``. This conveys information already included with
``sharding`` and ``arrays`` and serves as a double check.
sharding: Sharding: A global Sharding instance which describes how the output jax.Array is laid out across devices.
arrays: `list` or `tuple` of ``jax.Array``\s that are each single device addressable. ``len(arrays)``
must equal ``len(sharding.addressable_devices)`` and the shape of each array must be the same. For multiprocess code,
each process will call with a different ``arrays`` argument that corresponds to that processes' data.
These arrays are commonly created via ``jax.device_put``.
dtype: The dtype of the output ``jax.Array``. If not provided, the dtype of the first array in
``arrays`` is used. If ``arrays`` is empty, the ``dtype`` argument must be provided.
Returns:
A global ``jax.Array``, sharded as ``sharding``, with shape equal to ``shape``, and with per-device
contents matching ``arrays``.
Examples:
>>> import math
>>> from jax.sharding import Mesh
>>> from jax.sharding import PartitionSpec as P
>>> import numpy as np
...
>>> mesh_rows = 2
>>> mesh_cols = jax.device_count() // 2
...
>>> global_shape = (8, 8)
>>> mesh = Mesh(np.array(jax.devices()).reshape(mesh_rows, mesh_cols), ('x', 'y'))
>>> sharding = jax.sharding.NamedSharding(mesh, P('x', 'y'))
>>> inp_data = np.arange(math.prod(global_shape)).reshape(global_shape)
...
>>> arrays = [
... jax.device_put(inp_data[index], d)
... for d, index in sharding.addressable_devices_indices_map(global_shape).items()]
...
>>> arr = jax.make_array_from_single_device_arrays(global_shape, sharding, arrays)
>>> assert arr.shape == (8,8) # arr.shape is (8,8) regardless of jax.device_count()
For cases where you have a local array and want to convert it to a global
jax.Array, use ``jax.make_array_from_process_local_data``.
"""
if isinstance(arrays, Sequence):
dtype = _get_and_check_dtype(
arrays, dtype, "make_array_from_single_device_arrays")
# All input arrays should be committed. Checking it is expensive on
# single-controller systems.
aval = core.update_aval_with_sharding(
core.ShapedArray(shape, dtype, weak_type=False), sharding)
if dtypes.issubdtype(aval.dtype, dtypes.extended):
return aval.dtype._rules.make_sharded_array(aval, sharding, arrays,
committed=True)
arrays = list(arrays) if isinstance(arrays, tuple) else arrays
# TODO(phawkins): ideally the cast() could be checked.
try:
return ArrayImpl(aval, sharding, cast(Sequence[ArrayImpl], arrays),
committed=True)
except TypeError:
if not isinstance(arrays, list):
raise TypeError("jax.make_array_from_single_device_arrays `arrays` "
"argument must be a list or tuple, but got "
f"{type(arrays)}.")
if any(isinstance(arr, core.Tracer) for arr in arrays):
raise ValueError(
"jax.make_array_from_single_device_arrays requires a list of concrete"
f" arrays as input, but got types {set(map(type, arrays))}")
raise
dtypes.canonicalize_value_handlers[ArrayImpl] = lambda x: x
def _get_aval_array(self):
return core.update_aval_with_sharding(self.aval, self.sharding)
core.pytype_aval_mappings[ArrayImpl] = _get_aval_array
def _array_mlir_constant_handler(val, aval):
try:
return mlir.ir_constant(val._value)
except RuntimeError as e:
# TODO(yashkatariya): Ideally we would catch a custom exception from
# `_value` function in ArrayImpl instead of checking the error string.
if 'Fetching value for `jax.Array` that spans non-addressable' in str(e):
raise RuntimeError(
"Closing over jax.Array that spans non-addressable (non process"
" local) devices is not allowed. Please pass such arrays as arguments"
f" to the function. Got jax.Array: {val.aval.str_short()}") from e
raise
mlir.register_constant_handler(ArrayImpl, _array_mlir_constant_handler)
if config.use_simplified_jaxpr_constants.value:
core.literalable_types.add(ArrayImpl)
# NOTE(skye): we could refactor to generate _multi_slice parameters directly
# from the input ShardingSpec, rather than the indices. However, this would
# require duplicating the ordering logic of spec_to_indices, which is more
# subtle and more likely to change than the index logic we have to support here.
def as_slice_indices(arr: Any, idx: Index) -> tuple[
tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
"""Returns start_indices, limit_indices, removed_dims"""
start_indices = [0] * arr.ndim
limit_indices = list(arr.shape)
removed_dims: list[int] = []
tuple_idx = idx if isinstance(idx, tuple) else (idx,)
for dim, sub_idx in enumerate(tuple_idx):
if isinstance(sub_idx, int):
start_indices[dim] = sub_idx
limit_indices[dim] = sub_idx + 1
removed_dims.append(dim)
elif sub_idx == slice(None):
continue
else:
assert isinstance(sub_idx, slice), sub_idx
assert isinstance(sub_idx.start, int), sub_idx
assert isinstance(sub_idx.stop, int), sub_idx
start_indices[dim] = sub_idx.start
limit_indices[dim] = sub_idx.stop
return tuple(start_indices), tuple(limit_indices), tuple(removed_dims)
def shard_device_array(x, devices, indices, sharding):
start_indices, limit_indices, removed_dims = unzip3(
as_slice_indices(x, idx) for idx in indices)
if sharding.is_fully_replicated:
shards = [x] * len(devices)
else:
# TODO(yashkatariya): Maybe this should be set when we call the handler in
# InputsHandler.__call__?
with _internal_use_concrete_mesh(empty_concrete_mesh):
shards = x._multi_slice(start_indices, limit_indices, removed_dims)
aval = core.shaped_abstractify(x)
return pxla.batched_device_put(aval, sharding, shards, devices)
def shard_sharded_device_array_slow_path(x, devices, indices, sharding):
candidates = defaultdict(list)
bufs = [buf.data for buf in x.addressable_shards]
arr_indices = tuple(x.sharding.devices_indices_map(x.shape).values())
for buf, idx in safe_zip(bufs, arr_indices):
candidates[hashed_index(idx)].append(buf)
bufs = []
for idx, device in safe_zip(indices, devices):
# Look up all buffers that contain the correct slice of the logical array.
candidates_list = candidates[hashed_index(idx)]
if not candidates_list:
return pxla.shard_args([sharding], [None],
[xc.ArrayCopySemantics.REUSE_INPUT], [x._value],
canonicalize=False)[0]
# Try to find a candidate buffer already on the correct device,
# otherwise copy one of them.
for buf in candidates_list:
if buf.devices() == {device}:
bufs.append(buf)
break
else:
bufs.append(candidates_list[-1])
return pxla.batched_device_put(x.aval, sharding, bufs, devices)
@cache(max_size=4096, trace_context_in_key=False)
def _fallback_check_via_indices(src_sharding, dst_sharding, shape):
src_indices = src_sharding.addressable_devices_indices_map(shape).values()
dst_indices = dst_sharding.addressable_devices_indices_map(shape).values()
return tuple(src_indices) == tuple(dst_indices)
@cache(max_size=4096, trace_context_in_key=False)
def _sharding_indices_and_eq(src_sharding, dst_sharding, ndim):
hlos_eq = are_hlo_shardings_equal(src_sharding._to_xla_hlo_sharding(ndim),
dst_sharding._to_xla_hlo_sharding(ndim))
len_eq = (len(src_sharding._internal_device_list.addressable_device_list) ==
len(dst_sharding._internal_device_list.addressable_device_list))
return hlos_eq and len_eq
def _array_shard_arg(xs, shardings, layouts, copy_semantics):
util.test_event("_array_shard_arg")
results = []
batch_xs, batch_devs, batch_shardings, batch_indices = [], [], [], []
batch_cs = []
for i, (x, sharding, layout, cs) in enumerate(
safe_zip(xs, shardings, layouts, copy_semantics)):
x._check_if_deleted()
try:
same_sharding = _sharding_indices_and_eq(x.sharding, sharding, len(x.shape))
except NotImplementedError:
same_sharding = _fallback_check_via_indices(x.sharding, sharding, x.shape)
same_layout = True if layout is None else x.format.layout == layout
if not x.is_fully_addressable:
if same_sharding and same_layout:
results.append(x)
else:
raise NotImplementedError(
"Cannot reshard an input that is not fully addressable")
else:
devices = sharding._internal_device_list.addressable_device_list
if same_sharding and same_layout:
# Add a placeholder result that will be filled in later.
results.append(None)
# Accumulate arguments to `batched_copy_array_to_devices_with_sharding`.
batch_xs.append(x)
batch_devs.append(devices)
batch_shardings.append(sharding)
batch_indices.append(i)
batch_cs.append(cs)
# Resharding starts here:
elif not same_layout:
results.append(api.device_put(x, Format(layout, sharding)))
else:
indices = sharding.addressable_devices_indices_map(x.shape).values()
if dispatch.is_single_device_sharding(x.sharding):
results.append(shard_device_array(x, devices, indices, sharding))
else:
results.append(
shard_sharded_device_array_slow_path(x, devices, indices, sharding))
util.test_event("batched_copy_array")
copy_outs = xc.batched_copy_array_to_devices_with_sharding(
batch_xs, batch_devs, batch_shardings, batch_cs)
for i, copy_out in safe_zip(batch_indices, copy_outs):
assert results[i] is None
results[i] = copy_out
return results
pxla.shard_arg_handlers[ArrayImpl] = _array_shard_arg
def _array_global_result_handler(global_aval, out_sharding, committed):
global_aval = core.update_aval_with_sharding(global_aval, out_sharding)
if global_aval.dtype == dtypes.float0:
return lambda _: np.zeros(global_aval.shape, dtypes.float0)
if dtypes.issubdtype(global_aval.dtype, dtypes.extended):
return global_aval.dtype._rules.global_sharded_result_handler(
global_aval, out_sharding, committed)
return xc.array_result_handler(
global_aval, out_sharding, committed=committed, _skip_checks=True
)
pxla.global_result_handlers[core.ShapedArray] = _array_global_result_handler
# Only used for Arrays that come out of pmap.
def _array_local_result_handler(aval, sharding, indices):
if aval.dtype == dtypes.float0:
return lambda _: np.zeros(aval.shape, dtypes.float0)
if dtypes.issubdtype(aval.dtype, dtypes.extended):
return aval.dtype._rules.local_sharded_result_handler(
aval, sharding, indices)
return xc.array_result_handler(
aval, sharding, committed=True, _skip_checks=True
)
pxla.local_result_handlers[core.ShapedArray] = _array_local_result_handler
# Token handlers
def _token_shard_arg(xs, shardings, layouts, copy_semantics):
results = []
for x, sharding, layout in safe_zip(xs, shardings, layouts):
assert layout is None
x.block_until_ready()
x = np.array([], dtype=bool)
aval = core.get_aval(x)
devices = sharding._addressable_device_assignment
results.append(pxla.batched_device_put(
aval, sharding, [x] * len(devices), devices))
return results
pxla.shard_arg_handlers[core.Token] = _token_shard_arg
def _token_global_result_handler(global_aval, out_sharding, committed):
array_handler = _array_global_result_handler(
core.get_token_aval(), out_sharding, committed)
def wrapper(*args, **kwargs):
out_buf = array_handler(*args, **kwargs)
return core.Token(out_buf)
return wrapper
pxla.global_result_handlers[core.AbstractToken] = _token_global_result_handler
| ArrayImpl |
python | ethereum__web3.py | web3/types.py | {
"start": 13017,
"end": 13134
} | class ____(TypedDict, total=False):
balance: int
nonce: int
code: str
storage: dict[str, str]
| TraceData |
python | astropy__astropy | astropy/io/votable/tests/test_vo.py | {
"start": 6609,
"end": 7086
} | class ____:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.table = parse(
get_pkg_data_filename("data/regression.xml")
).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array["string_test_2"], self.array["fixed string test"])
| TestFixups |
python | django__django | tests/gis_tests/layermap/tests.py | {
"start": 18055,
"end": 18358
} | class ____(TestCase):
databases = {"default", "other"}
@unittest.skipUnless(len(settings.DATABASES) > 1, "multiple databases required")
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, "other")
| LayerMapRouterTest |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/metadata/__init__.py | {
"start": 1581,
"end": 4339
} | class ____(Protocol):
NAME: 'Literal["importlib", "pkg_resources"]'
Distribution: Type[BaseDistribution]
Environment: Type[BaseEnvironment]
@functools.lru_cache(maxsize=None)
def select_backend() -> Backend:
if _should_use_importlib_metadata():
from . import importlib
return cast(Backend, importlib)
from . import pkg_resources
return cast(Backend, pkg_resources)
def get_default_environment() -> BaseEnvironment:
"""Get the default representation for the current environment.
This returns an Environment instance from the chosen backend. The default
Environment instance should be built from ``sys.path`` and may use caching
to share instance state accorss calls.
"""
return select_backend().Environment.default()
def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:
"""Get a representation of the environment specified by ``paths``.
This returns an Environment instance from the chosen backend based on the
given import paths. The backend must build a fresh instance representing
the state of installed distributions when this function is called.
"""
return select_backend().Environment.from_paths(paths)
def get_directory_distribution(directory: str) -> BaseDistribution:
"""Get the distribution metadata representation in the specified directory.
This returns a Distribution instance from the chosen backend based on
the given on-disk ``.dist-info`` directory.
"""
return select_backend().Distribution.from_directory(directory)
def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution:
"""Get the representation of the specified wheel's distribution metadata.
This returns a Distribution instance from the chosen backend based on
the given wheel's ``.dist-info`` directory.
:param canonical_name: Normalized project name of the given wheel.
"""
return select_backend().Distribution.from_wheel(wheel, canonical_name)
def get_metadata_distribution(
metadata_contents: bytes,
filename: str,
canonical_name: str,
) -> BaseDistribution:
"""Get the dist representation of the specified METADATA file contents.
This returns a Distribution instance from the chosen backend sourced from the data
in `metadata_contents`.
:param metadata_contents: Contents of a METADATA file within a dist, or one served
via PEP 658.
:param filename: Filename for the dist this metadata represents.
:param canonical_name: Normalized project name of the given dist.
"""
return select_backend().Distribution.from_metadata_file_contents(
metadata_contents,
filename,
canonical_name,
)
| Backend |
python | openai__openai-python | tests/test_legacy_response.py | {
"start": 1938,
"end": 3708
} | class ____(BaseModel):
foo: str
bar: int
def test_response_parse_custom_model(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(to=CustomModel)
assert obj.foo == "hello!"
assert obj.bar == 2
def test_response_basemodel_request_id(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(
200,
headers={"x-request-id": "my-req-id"},
content=json.dumps({"foo": "hello!", "bar": 2}),
),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(to=CustomModel)
assert obj._request_id == "my-req-id"
assert obj.foo == "hello!"
assert obj.bar == 2
assert obj.to_dict() == {"foo": "hello!", "bar": 2}
assert "_request_id" not in rich_print_str(obj)
assert "__exclude_fields__" not in rich_print_str(obj)
def test_response_parse_annotated_type(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(
to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]),
)
assert obj.foo == "hello!"
assert obj.bar == 2
| CustomModel |
python | numba__llvmlite | llvmlite/binding/targets.py | {
"start": 1672,
"end": 4926
} | class ____(dict):
"""
Maps feature name to a boolean indicating the availability of the feature.
Extends ``dict`` to add `.flatten()` method.
"""
def flatten(self, sort=True):
"""
Args
----
sort: bool
Optional. If True, the features are sorted by name; otherwise,
the ordering is unstable between python session due to hash
randomization. Defaults to True.
Returns a string suitable for use as the ``features`` argument to
``Target.create_target_machine()``.
"""
iterator = sorted(self.items()) if sort else iter(self.items())
flag_map = {True: '+', False: '-'}
return ','.join('{0}{1}'.format(flag_map[v], k)
for k, v in iterator)
def get_host_cpu_features():
"""
Returns a dictionary-like object indicating the CPU features for current
architecture and whether they are enabled for this CPU. The key-value pairs
are the feature name as string and a boolean indicating whether the feature
is available. The returned value is an instance of ``FeatureMap`` class,
which adds a new method ``.flatten()`` for returning a string suitable for
use as the "features" argument to ``Target.create_target_machine()``.
If LLVM has not implemented this feature or it fails to get the information,
this function will raise a RuntimeError exception.
"""
with ffi.OutputString() as out:
outdict = FeatureMap()
if not ffi.lib.LLVMPY_GetHostCPUFeatures(out):
return outdict
flag_map = {'+': True, '-': False}
content = str(out)
if content: # protect against empty string
for feat in content.split(','):
if feat: # protect against empty feature
outdict[feat[1:]] = flag_map[feat[0]]
return outdict
def get_default_triple():
"""
Return the default target triple LLVM is configured to produce code for.
"""
with ffi.OutputString() as out:
ffi.lib.LLVMPY_GetDefaultTargetTriple(out)
return str(out)
def get_host_cpu_name():
"""
Get the name of the host's CPU, suitable for using with
:meth:`Target.create_target_machine()`.
"""
with ffi.OutputString() as out:
ffi.lib.LLVMPY_GetHostCPUName(out)
return str(out)
# Adapted from https://github.com/llvm/llvm-project/blob/release/15.x/llvm/include/llvm/ADT/Triple.h#L269 # noqa
llvm_version_major = llvm_version_info[0]
_object_formats = {
0: "Unknown",
1: "COFF",
2: "DXContainer",
3: "ELF",
4: "GOFF",
5: "MachO",
6: "SPIRV",
7: "Wasm",
8: "XCOFF",
}
def get_object_format(triple=None):
"""
Get the object format for the given *triple* string (or the default
triple if omitted).
A string is returned
"""
if triple is None:
triple = get_default_triple()
res = ffi.lib.LLVMPY_GetTripleObjectFormat(_encode_string(triple))
return _object_formats[res]
def create_target_data(layout):
"""
Create a TargetData instance for the given *layout* string.
"""
return TargetData(ffi.lib.LLVMPY_CreateTargetData(_encode_string(layout)))
| FeatureMap |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0050_migrate_external_builds.py | {
"start": 531,
"end": 759
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0049_add_external_build_enabled"),
]
operations = [
migrations.RunPython(migrate_features),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver.py | {
"start": 1767,
"end": 3315
} | class ____(object):
"""Dummy local Cloud TPU client."""
def api_available(self):
return False
_TPU_DEVICE_REGEX = re.compile(
r'.*task:(?P<host_id>\d+)/.*device:TPU:(?P<core_id>\d+)$')
_TPU_CONN_RETRIES = 120
DeviceDetails = collections.namedtuple(
'DeviceDetails', ['device_map', 'total_cores'])
def initialize_tpu_system(cluster_resolver=None):
"""Initialize the TPU devices.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Returns:
The tf.tpu.Topology object for the topology of the TPU cluster. If called
inside tf.function, it returns the serialized topology object instead.
Raises:
RuntimeError: If running inside a tf.function.
NotFoundError: If no TPU devices found in eager mode.
"""
return tpu_strategy_util.initialize_tpu_system_impl(
cluster_resolver, TPUClusterResolver)
def shutdown_tpu_system(cluster_resolver=None):
"""Shuts down the TPU devices.
This will clear all caches, even those that are maintained through sequential
calls to tf.tpu.experimental.initialize_tpu_system, such as the compilation
cache.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Raises:
RuntimeError: If no TPU devices found for eager execution or if run in a
tf.function.
"""
tpu_strategy_util.shutdown_tpu_system_impl(
cluster_resolver, TPUClusterResolver)
| _LocalCloudTpuClient |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_self/SLF001.py | {
"start": 0,
"end": 249
} | class ____(type):
_private_count = 1
def __new__(mcs, name, bases, attrs):
if mcs._private_count <= 5:
mcs.some_method()
return super().__new__(mcs, name, bases, attrs)
def some_method():
pass
| BazMeta |
python | sphinx-doc__sphinx | sphinx/builders/latex/theming.py | {
"start": 2021,
"end": 3163
} | class ____(Theme):
"""A user defined LaTeX theme."""
REQUIRED_CONFIG_KEYS = ['docclass', 'wrapperclass']
OPTIONAL_CONFIG_KEYS = ['papersize', 'pointsize', 'toplevel_sectioning']
def __init__(self, name: str, filename: Path) -> None:
super().__init__(name)
self.config = configparser.RawConfigParser()
self.config.read(filename, encoding='utf-8')
for key in self.REQUIRED_CONFIG_KEYS:
try:
value = self.config.get('theme', key)
setattr(self, key, value)
except configparser.NoSectionError as exc:
msg = __('%r doesn\'t have "theme" setting') % filename
raise ThemeError(msg) from exc
except configparser.NoOptionError as exc:
msg = __('%r doesn\'t have "%s" setting') % (filename, exc.args[0])
raise ThemeError(msg) from exc
for key in self.OPTIONAL_CONFIG_KEYS:
try:
value = self.config.get('theme', key)
setattr(self, key, value)
except configparser.NoOptionError:
pass
| UserTheme |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/output/plain_text.py | {
"start": 338,
"end": 3296
} | class ____(Output):
"""
Output that won't include any ANSI escape sequences.
Useful when stdout is not a terminal. Maybe stdout is redirected to a file.
In this case, if `print_formatted_text` is used, for instance, we don't
want to include formatting.
(The code is mostly identical to `Vt100_Output`, but without the
formatting.)
"""
def __init__(self, stdout: TextIO) -> None:
assert all(hasattr(stdout, a) for a in ("write", "flush"))
self.stdout: TextIO = stdout
self._buffer: list[str] = []
def fileno(self) -> int:
"There is no sensible default for fileno()."
return self.stdout.fileno()
def encoding(self) -> str:
return "utf-8"
def write(self, data: str) -> None:
self._buffer.append(data)
def write_raw(self, data: str) -> None:
self._buffer.append(data)
def set_title(self, title: str) -> None:
pass
def clear_title(self) -> None:
pass
def flush(self) -> None:
if not self._buffer:
return
data = "".join(self._buffer)
self._buffer = []
flush_stdout(self.stdout, data)
def erase_screen(self) -> None:
pass
def enter_alternate_screen(self) -> None:
pass
def quit_alternate_screen(self) -> None:
pass
def enable_mouse_support(self) -> None:
pass
def disable_mouse_support(self) -> None:
pass
def erase_end_of_line(self) -> None:
pass
def erase_down(self) -> None:
pass
def reset_attributes(self) -> None:
pass
def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None:
pass
def disable_autowrap(self) -> None:
pass
def enable_autowrap(self) -> None:
pass
def cursor_goto(self, row: int = 0, column: int = 0) -> None:
pass
def cursor_up(self, amount: int) -> None:
pass
def cursor_down(self, amount: int) -> None:
self._buffer.append("\n")
def cursor_forward(self, amount: int) -> None:
self._buffer.append(" " * amount)
def cursor_backward(self, amount: int) -> None:
pass
def hide_cursor(self) -> None:
pass
def show_cursor(self) -> None:
pass
def set_cursor_shape(self, cursor_shape: CursorShape) -> None:
pass
def reset_cursor_shape(self) -> None:
pass
def ask_for_cpr(self) -> None:
pass
def bell(self) -> None:
pass
def enable_bracketed_paste(self) -> None:
pass
def disable_bracketed_paste(self) -> None:
pass
def scroll_buffer_to_prompt(self) -> None:
pass
def get_size(self) -> Size:
return Size(rows=40, columns=80)
def get_rows_below_cursor_position(self) -> int:
return 8
def get_default_color_depth(self) -> ColorDepth:
return ColorDepth.DEPTH_1_BIT
| PlainTextOutput |
python | modin-project__modin | modin/config/envvars.py | {
"start": 38006,
"end": 38164
} | class ____(EnvironmentVariable, type=bool):
"""Set to true when running Modin in GitHub CI."""
varname = "MODIN_GITHUB_CI"
default = False
| GithubCI |
python | walkccc__LeetCode | solutions/573. Squirrel Simulation/573.py | {
"start": 0,
"end": 431
} | class ____:
def minDistance(
self,
height: int,
width: int,
tree: list[int],
squirrel: list[int],
nuts: list[list[int]],
) -> int:
def dist(a: list[int], b: list[int]) -> int:
return abs(a[0] - b[0]) + abs(a[1] - b[1])
totDist = sum(dist(nut, tree) for nut in nuts) * 2
maxSave = max(dist(nut, tree) - dist(nut, squirrel) for nut in nuts)
return totDist - maxSave
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/skills/versions.py | {
"start": 23102,
"end": 23634
} | class ____:
def __init__(self, versions: Versions) -> None:
self._versions = versions
self.create = _legacy_response.to_raw_response_wrapper(
versions.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
versions.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
versions.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
versions.delete,
)
| VersionsWithRawResponse |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/asset_daemon_scenario.py | {
"start": 24122,
"end": 25112
} | class ____(NamedTuple):
"""Describes a scenario that the AssetDaemon should be tested against. Consists of an id
describing what is to be tested, an initial state, and a scenario function which will modify
that state and make assertions about it along the way.
"""
id: str
initial_spec: ScenarioSpec
execution_fn: Callable[[AssetDaemonScenarioState], AssetDaemonScenarioState]
def evaluate_fast(self) -> None:
self.execution_fn(AssetDaemonScenarioState(self.initial_spec))
def evaluate_daemon(
self, instance: DagsterInstance, sensor_name: Optional[str] = None, threadpool_executor=None
) -> "AssetDaemonScenarioState":
return self.execution_fn(
AssetDaemonScenarioState(
self.initial_spec,
instance=instance,
is_daemon=True,
sensor_name=sensor_name,
threadpool_executor=threadpool_executor,
)
)
| AssetDaemonScenario |
python | kubernetes-client__python | kubernetes/base/dynamic/discovery.py | {
"start": 9068,
"end": 13287
} | class ____(Discoverer):
""" A convenient container for storing discovered API resources. Allows
easy searching and retrieval of specific resources.
Resources for the cluster are loaded lazily.
"""
def __init__(self, client, cache_file):
Discoverer.__init__(self, client, cache_file)
self.__update_cache = False
def discover(self):
self.__resources = self.parse_api_groups(request_resources=False)
def __maybe_write_cache(self):
if self.__update_cache:
self._write_cache()
self.__update_cache = False
@property
def api_groups(self):
return self.parse_api_groups(request_resources=False, update=True)['apis'].keys()
def search(self, **kwargs):
# In first call, ignore ResourceNotFoundError and set default value for results
try:
results = self.__search(self.__build_search(**kwargs), self.__resources, [])
except ResourceNotFoundError:
results = []
if not results:
self.invalidate_cache()
results = self.__search(self.__build_search(**kwargs), self.__resources, [])
self.__maybe_write_cache()
return results
def __search(self, parts, resources, reqParams):
part = parts[0]
if part != '*':
resourcePart = resources.get(part)
if not resourcePart:
return []
elif isinstance(resourcePart, ResourceGroup):
if len(reqParams) != 2:
raise ValueError("prefix and group params should be present, have %s" % reqParams)
# Check if we've requested resources for this group
if not resourcePart.resources:
prefix, group, version = reqParams[0], reqParams[1], part
try:
resourcePart.resources = self.get_resources_for_api_version(
prefix, group, part, resourcePart.preferred)
except NotFoundError:
raise ResourceNotFoundError
self._cache['resources'][prefix][group][version] = resourcePart
self.__update_cache = True
return self.__search(parts[1:], resourcePart.resources, reqParams)
elif isinstance(resourcePart, dict):
# In this case parts [0] will be a specified prefix, group, version
# as we recurse
return self.__search(parts[1:], resourcePart, reqParams + [part] )
else:
if parts[1] != '*' and isinstance(parts[1], dict):
for _resource in resourcePart:
for term, value in parts[1].items():
if getattr(_resource, term) == value:
return [_resource]
return []
else:
return resourcePart
else:
matches = []
for key in resources.keys():
matches.extend(self.__search([key] + parts[1:], resources, reqParams))
return matches
def __build_search(self, prefix=None, group=None, api_version=None, kind=None, **kwargs):
if not group and api_version and '/' in api_version:
group, api_version = api_version.split('/')
items = [prefix, group, api_version, kind, kwargs]
return list(map(lambda x: x or '*', items))
def __iter__(self):
for prefix, groups in self.__resources.items():
for group, versions in groups.items():
for version, rg in versions.items():
# Request resources for this groupVersion if we haven't yet
if not rg.resources:
rg.resources = self.get_resources_for_api_version(
prefix, group, version, rg.preferred)
self._cache['resources'][prefix][group][version] = rg
self.__update_cache = True
for _, resource in six.iteritems(rg.resources):
yield resource
self.__maybe_write_cache()
| LazyDiscoverer |
python | walkccc__LeetCode | solutions/676. Implement Magic Dictionary/676.py | {
"start": 0,
"end": 593
} | class ____:
def __init__(self):
self.dict = {}
def buildDict(self, dictionary: list[str]) -> None:
for word in dictionary:
for i, c in enumerate(word):
replaced = self._getReplaced(word, i)
self.dict[replaced] = '*' if replaced in self.dict else c
def search(self, searchWord: str) -> bool:
for i, c in enumerate(searchWord):
replaced = self._getReplaced(searchWord, i)
if self.dict.get(replaced, c) != c:
return True
return False
def _getReplaced(self, s: str, i: int) -> str:
return s[:i] + '*' + s[i + 1:]
| MagicDictionary |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 3189,
"end": 3366
} | class ____(graphene.ObjectType):
dataset_summaries = graphene.List(graphene.NonNull(MlflowDatasetSummary))
apiError = graphene.Field(ApiError)
| MlflowSearchDatasetsResponse |
python | django__django | tests/serializers/models/base.py | {
"start": 2663,
"end": 2724
} | class ____(models.Model):
score = models.FloatField()
| Score |
python | google__pytype | pytype/pyi/metadata_test.py | {
"start": 1675,
"end": 2936
} | class ____(unittest.TestCase):
def test_deprecated(self):
pytd = metadata.to_pytd({'tag': 'Deprecated', 'reason': 'squished by duck'})
self.assertEqual(pytd, "Deprecated('squished by duck')")
def test_call_noargs(self):
pytd = metadata.to_pytd(
{'tag': 'call', 'fn': 'Quack', 'posargs': (), 'kwargs': {}}
)
self.assertEqual(pytd, 'Quack()')
def test_call_posargs(self):
pytd = metadata.to_pytd(
{'tag': 'call', 'fn': 'Quack', 'posargs': [2], 'kwargs': {}}
)
self.assertEqual(pytd, 'Quack(2)')
def test_call_kwargs(self):
pytd = metadata.to_pytd(
{'tag': 'call', 'fn': 'Quack', 'posargs': (), 'kwargs': {'volume': 4.5}}
)
self.assertEqual(pytd, 'Quack(volume=4.5)')
def test_call_allargs(self):
pytd = metadata.to_pytd({
'tag': 'call',
'fn': 'Quack',
'posargs': [2, 'brown'],
'kwargs': {'volume': 4.5, 'mode': 'correct'},
})
self.assertEqual(pytd, "Quack(2, 'brown', volume=4.5, mode='correct')")
def test_noncall_tag(self):
self.assertEqual(metadata.to_pytd({'tag': 'sneeze'}), "{'tag': 'sneeze'}")
def test_no_tag(self):
self.assertEqual(metadata.to_pytd({}), '{}')
if __name__ == '__main__':
unittest.main()
| ToPytdTest |
python | django__django | django/contrib/postgres/fields/ranges.py | {
"start": 10507,
"end": 10719
} | class ____(models.Transform):
lookup_name = "endswith"
function = "upper"
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
| RangeEndsWith |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 86917,
"end": 89811
} | class ____(metaclass=ABCMeta):
"""Abstract base class for linkers"""
@classmethod
def new(cls, max_registers=0, lineinfo=False, cc=None):
if config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY:
return MVCLinker(max_registers, lineinfo, cc)
elif USE_NV_BINDING:
return CudaPythonLinker(max_registers, lineinfo, cc)
else:
return CtypesLinker(max_registers, lineinfo, cc)
@abstractmethod
def __init__(self, max_registers, lineinfo, cc):
# LTO unsupported in Numba at present, but the pynvjitlink linker
# (https://github.com/rapidsai/pynvjitlink) supports it,
self.lto = False
@property
@abstractmethod
def info_log(self):
"""Return the info log from the linker invocation"""
@property
@abstractmethod
def error_log(self):
"""Return the error log from the linker invocation"""
@abstractmethod
def add_ptx(self, ptx, name):
"""Add PTX source in a string to the link"""
def add_cu(self, cu, name):
"""Add CUDA source in a string to the link. The name of the source
file should be specified in `name`."""
with driver.get_active_context() as ac:
dev = driver.get_device(ac.devnum)
cc = dev.compute_capability
ptx, log = nvrtc.compile(cu, name, cc)
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % name).center(80, '-'))
print(ptx)
print('=' * 80)
# Link the program's PTX using the normal linker mechanism
ptx_name = os.path.splitext(name)[0] + ".ptx"
self.add_ptx(ptx.encode(), ptx_name)
@abstractmethod
def add_file(self, path, kind):
"""Add code from a file to the link"""
def add_cu_file(self, path):
with open(path, 'rb') as f:
cu = f.read()
self.add_cu(cu, os.path.basename(path))
def add_file_guess_ext(self, path):
"""Add a file to the link, guessing its type from its extension."""
ext = os.path.splitext(path)[1][1:]
if ext == '':
raise RuntimeError("Don't know how to link file with no extension")
elif ext == 'cu':
self.add_cu_file(path)
else:
kind = FILE_EXTENSION_MAP.get(ext, None)
if kind is None:
raise RuntimeError("Don't know how to link file with extension "
f".{ext}")
self.add_file(path, kind)
@abstractmethod
def complete(self):
"""Complete the link. Returns (cubin, size)
cubin is a pointer to a internal buffer of cubin owned by the linker;
thus, it should be loaded before the linker is destroyed.
"""
_MVC_ERROR_MESSAGE = (
"Minor version compatibility requires ptxcompiler and cubinlinker packages "
"to be available"
)
| Linker |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_stats.py | {
"start": 200,
"end": 1346
} | class ____(APITestCase):
@freeze_time(before_now(days=1).replace(minute=10))
def test_simple(self) -> None:
self.login_as(user=self.user)
group1 = self.store_event(
data={
"fingerprint": ["group1"],
"timestamp": before_now(minutes=5).isoformat(),
},
project_id=self.project.id,
).group
assert group1 is not None
url = f"/api/0/issues/{group1.id}/stats/"
for fingerprint, count in (("group1", 2), ("group2", 5)):
for _ in range(count):
self.store_event(
data={
"fingerprint": [fingerprint],
"timestamp": before_now(minutes=5).isoformat(),
},
project_id=self.project.id,
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data[-1][1] == 3, response.data
for point in response.data[:-1]:
assert point[1] == 0
assert len(response.data) == 24
| GroupStatsTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 468038,
"end": 468702
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of AddPullRequestReviewComment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "comment", "comment_edge")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
comment = sgqlc.types.Field("PullRequestReviewComment", graphql_name="comment")
"""The newly created comment."""
comment_edge = sgqlc.types.Field("PullRequestReviewCommentEdge", graphql_name="commentEdge")
"""The edge from the review's comment connection."""
| AddPullRequestReviewCommentPayload |
python | scikit-learn__scikit-learn | asv_benchmarks/benchmarks/linear_model.py | {
"start": 424,
"end": 1705
} | class ____(Predictor, Estimator, Benchmark):
"""
Benchmarks for LogisticRegression.
"""
param_names = ["representation", "solver", "n_jobs"]
params = (["dense", "sparse"], ["lbfgs", "saga"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, solver, n_jobs = params
if Benchmark.data_size == "large":
if representation == "sparse":
data = _20newsgroups_highdim_dataset(n_samples=10000)
else:
data = _20newsgroups_lowdim_dataset(n_components=1e3)
else:
if representation == "sparse":
data = _20newsgroups_highdim_dataset(n_samples=2500)
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, solver, n_jobs = params
l1_ratio = 0 if solver == "lbfgs" else 1
estimator = LogisticRegression(
solver=solver,
l1_ratio=l1_ratio,
tol=0.01,
n_jobs=n_jobs,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| LogisticRegressionBenchmark |
python | getsentry__sentry | tests/sentry/web/frontend/generic/test_static_media.py | {
"start": 306,
"end": 6343
} | class ____(TestCase):
@override_settings(DEBUG=False)
def test_basic(self) -> None:
url = "/_static/sentry/js/ads.js"
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == NEVER_CACHE
assert response["Vary"] == "Accept-Encoding"
assert response["Access-Control-Allow-Origin"] == "*"
assert "Content-Encoding" not in response
@override_settings(DEBUG=False)
def test_versioned(self) -> None:
url = "/_static/1234567890/sentry/js/ads.js"
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == FOREVER_CACHE
assert response["Vary"] == "Accept-Encoding"
assert response["Access-Control-Allow-Origin"] == "*"
assert "Content-Encoding" not in response
url = "/_static/a43db3b08ddd4918972f80739f15344b/sentry/js/ads.js"
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == FOREVER_CACHE
assert response["Vary"] == "Accept-Encoding"
assert response["Access-Control-Allow-Origin"] == "*"
assert "Content-Encoding" not in response
with override_settings(DEBUG=True):
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == NEVER_CACHE
assert response["Vary"] == "Accept-Encoding"
assert response["Access-Control-Allow-Origin"] == "*"
@override_settings(DEBUG=False)
def test_frontend_app_assets(self) -> None:
"""
static assets that do not have versioned filenames/paths
"""
# non-existant dist file
response = self.client.get("/_static/dist/sentry/invalid.js")
assert response.status_code == 404, response
dist_path = os.path.join("src", "sentry", "static", "sentry", "dist", "entrypoints")
os.makedirs(dist_path, exist_ok=True)
try:
with open(os.path.join(dist_path, "test.js"), "a"):
url = get_frontend_app_asset_url("sentry", "entrypoints/test.js")
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == NO_CACHE
assert response["Vary"] == "Accept-Encoding"
assert response["Access-Control-Allow-Origin"] == "*"
assert "Content-Encoding" not in response
with override_settings(DEBUG=True):
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == NEVER_CACHE
assert response["Vary"] == "Accept-Encoding"
assert response["Access-Control-Allow-Origin"] == "*"
finally:
try:
os.unlink(os.path.join(dist_path, "test.js"))
except Exception:
pass
@override_settings(DEBUG=False)
def test_no_cors(self) -> None:
url = "/_static/sentry/images/favicon.ico"
response = self.client.get(url)
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Cache-Control"] == NEVER_CACHE
assert response["Vary"] == "Accept-Encoding"
assert "Access-Control-Allow-Origin" not in response
assert "Content-Encoding" not in response
def test_404(self) -> None:
url = "/_static/sentry/app/thisfiledoesnotexistlol.js"
response = self.client.get(url)
assert response.status_code == 404, response
def test_gzip(self) -> None:
url = "/_static/sentry/js/ads.js"
response = self.client.get(url, HTTP_ACCEPT_ENCODING="gzip,deflate")
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Vary"] == "Accept-Encoding"
assert "Content-Encoding" not in response
try:
with open("src/sentry/static/sentry/js/ads.js.gz", "a"):
pass
# Not a gzip Accept-Encoding, so shouldn't serve gzipped file
response = self.client.get(url, HTTP_ACCEPT_ENCODING="lol")
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Vary"] == "Accept-Encoding"
assert "Content-Encoding" not in response
response = self.client.get(url, HTTP_ACCEPT_ENCODING="gzip,deflate")
close_streaming_response(response)
assert response.status_code == 200, response
assert response["Vary"] == "Accept-Encoding"
assert response["Content-Encoding"] == "gzip"
finally:
try:
os.unlink("src/sentry/static/sentry/js/ads.js.gz")
except Exception:
pass
def test_file_not_found(self) -> None:
url = "/_static/sentry/app/xxxxxxxxxxxxxxxxxxxxxxxx.js"
response = self.client.get(url)
assert response.status_code == 404, response
def test_bad_access(self) -> None:
url = "/_static/sentry/images/../../../../../etc/passwd"
response = self.client.get(url)
assert response.status_code == 404, response
def test_directory(self) -> None:
url = "/_static/sentry/images/"
response = self.client.get(url)
assert response.status_code == 404, response
url = "/_static/sentry/images"
response = self.client.get(url)
assert response.status_code == 404, response
| StaticMediaTest |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_cairo.py | {
"start": 13446,
"end": 18476
} | class ____(FigureCanvasBase):
@property
def _renderer(self):
# In theory, _renderer should be set in __init__, but GUI canvas
# subclasses (FigureCanvasFooCairo) don't always interact well with
# multiple inheritance (FigureCanvasFoo inits but doesn't super-init
# FigureCanvasCairo), so initialize it in the getter instead.
if not hasattr(self, "_cached_renderer"):
self._cached_renderer = RendererCairo(self.figure.dpi)
return self._cached_renderer
def get_renderer(self):
return self._renderer
def copy_from_bbox(self, bbox):
surface = self._renderer.gc.ctx.get_target()
if not isinstance(surface, cairo.ImageSurface):
raise RuntimeError(
"copy_from_bbox only works when rendering to an ImageSurface")
sw = surface.get_width()
sh = surface.get_height()
x0 = math.ceil(bbox.x0)
x1 = math.floor(bbox.x1)
y0 = math.ceil(sh - bbox.y1)
y1 = math.floor(sh - bbox.y0)
if not (0 <= x0 and x1 <= sw and bbox.x0 <= bbox.x1
and 0 <= y0 and y1 <= sh and bbox.y0 <= bbox.y1):
raise ValueError("Invalid bbox")
sls = slice(y0, y0 + max(y1 - y0, 0)), slice(x0, x0 + max(x1 - x0, 0))
data = (np.frombuffer(surface.get_data(), np.uint32)
.reshape((sh, sw))[sls].copy())
return _CairoRegion(sls, data)
def restore_region(self, region):
surface = self._renderer.gc.ctx.get_target()
if not isinstance(surface, cairo.ImageSurface):
raise RuntimeError(
"restore_region only works when rendering to an ImageSurface")
surface.flush()
sw = surface.get_width()
sh = surface.get_height()
sly, slx = region._slices
(np.frombuffer(surface.get_data(), np.uint32)
.reshape((sh, sw))[sly, slx]) = region._data
surface.mark_dirty_rectangle(
slx.start, sly.start, slx.stop - slx.start, sly.stop - sly.start)
def print_png(self, fobj):
self._get_printed_image_surface().write_to_png(fobj)
def print_rgba(self, fobj):
width, height = self.get_width_height()
buf = self._get_printed_image_surface().get_data()
fobj.write(cbook._premultiplied_argb32_to_unmultiplied_rgba8888(
np.asarray(buf).reshape((width, height, 4))))
print_raw = print_rgba
def _get_printed_image_surface(self):
self._renderer.dpi = self.figure.dpi
width, height = self.get_width_height()
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_context(cairo.Context(surface))
self.figure.draw(self._renderer)
return surface
def _save(self, fmt, fobj, *, orientation='portrait'):
# save PDF/PS/SVG
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (
height_in_points, width_in_points)
if fmt == 'ps':
if not hasattr(cairo, 'PSSurface'):
raise RuntimeError('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface(fobj, width_in_points, height_in_points)
elif fmt == 'pdf':
if not hasattr(cairo, 'PDFSurface'):
raise RuntimeError('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface(fobj, width_in_points, height_in_points)
elif fmt in ('svg', 'svgz'):
if not hasattr(cairo, 'SVGSurface'):
raise RuntimeError('cairo has not been compiled with SVG '
'support enabled')
if fmt == 'svgz':
if isinstance(fobj, str):
fobj = gzip.GzipFile(fobj, 'wb')
else:
fobj = gzip.GzipFile(None, 'wb', fileobj=fobj)
surface = cairo.SVGSurface(fobj, width_in_points, height_in_points)
else:
raise ValueError(f"Unknown format: {fmt!r}")
self._renderer.dpi = self.figure.dpi
self._renderer.set_context(cairo.Context(surface))
ctx = self._renderer.gc.ctx
if orientation == 'landscape':
ctx.rotate(np.pi / 2)
ctx.translate(0, -height_in_points)
# Perhaps add an '%%Orientation: Landscape' comment?
self.figure.draw(self._renderer)
ctx.show_page()
surface.finish()
if fmt == 'svgz':
fobj.close()
print_pdf = functools.partialmethod(_save, "pdf")
print_ps = functools.partialmethod(_save, "ps")
print_svg = functools.partialmethod(_save, "svg")
print_svgz = functools.partialmethod(_save, "svgz")
@_Backend.export
| FigureCanvasCairo |
python | ray-project__ray | python/ray/_private/test_utils.py | {
"start": 48059,
"end": 49339
} | class ____(ResourceKillerActor):
async def _find_resources_to_kill(self):
nodes_to_kill = []
while not nodes_to_kill and self.is_running:
worker_nodes = [
node
for node in ray.nodes()
if node["Alive"]
and (node["NodeID"] != self.head_node_id)
and (node["NodeID"] not in self.killed)
]
if self.kill_filter_fn:
candidates = list(filter(self.kill_filter_fn(), worker_nodes))
else:
candidates = worker_nodes
# Ensure at least one worker node remains alive.
if len(worker_nodes) < self.batch_size_to_kill + 1:
# Give the cluster some time to start.
await asyncio.sleep(1)
continue
# Collect nodes to kill, limited by batch size.
for candidate in candidates[: self.batch_size_to_kill]:
nodes_to_kill.append(
(
candidate["NodeID"],
candidate["NodeManagerAddress"],
candidate["NodeManagerPort"],
)
)
return nodes_to_kill
@ray.remote(num_cpus=0)
| NodeKillerBase |
python | OmkarPathak__pygorithm | tests/test_geometry.py | {
"start": 39892,
"end": 62026
} | class ____(unittest.TestCase):
def test_constructor_defaults(self):
_rect = rect2.Rect2(1, 1)
self.assertIsNotNone(_rect)
self.assertEqual(1, _rect.width)
self.assertEqual(1, _rect.height)
self.assertIsNotNone(_rect.mincorner)
self.assertEqual(0, _rect.mincorner.x)
self.assertEqual(0, _rect.mincorner.y)
def test_constructor_specified(self):
_rect = rect2.Rect2(1, 3, vector2.Vector2(-1, -1))
self.assertEqual(1, _rect.width)
self.assertEqual(3, _rect.height)
self.assertIsNotNone(_rect.mincorner)
self.assertEqual(-1, _rect.mincorner.x)
self.assertEqual(-1, _rect.mincorner.y)
def test_constructor_errors(self):
with self.assertRaises(ValueError):
_rect = rect2.Rect2(-1, 1)
with self.assertRaises(ValueError):
_rect = rect2.Rect2(1, -1)
with self.assertRaises(ValueError):
_rect = rect2.Rect2(0, 1)
with self.assertRaises(ValueError):
_rect = rect2.Rect2(5, 0)
with self.assertRaises(ValueError):
_rect = rect2.Rect2(0, 0)
with self.assertRaises(ValueError):
_rect = rect2.Rect2(-3, -3)
def test_width(self):
_rect = rect2.Rect2(1, 1)
self.assertEqual(1, _rect.width)
_rect.width = 3
self.assertEqual(3, _rect.width)
with self.assertRaises(ValueError):
_rect.width = 0
_rect = rect2.Rect2(1, 1)
with self.assertRaises(ValueError):
_rect.width = -3
def test_height(self):
_rect = rect2.Rect2(7, 11)
self.assertEqual(11, _rect.height)
_rect.height = 5
self.assertEqual(5, _rect.height)
with self.assertRaises(ValueError):
_rect.height = 0
_rect = rect2.Rect2(1, 1)
with self.assertRaises(ValueError):
_rect.height = -15
_rect = rect2.Rect2(1, 1)
with self.assertRaises(ValueError):
_rect.height = 1e-09
def test_polygon_unshifted(self):
_rect = rect2.Rect2(1, 1)
self.assertIsNotNone(_rect.polygon)
self.assertEqual(0, _rect.polygon.points[0].x)
self.assertEqual(0, _rect.polygon.points[0].y)
self.assertEqual(0, _rect.polygon.points[1].x)
self.assertEqual(1, _rect.polygon.points[1].y)
self.assertEqual(1, _rect.polygon.points[2].x)
self.assertEqual(1, _rect.polygon.points[2].y)
self.assertEqual(1, _rect.polygon.points[3].x)
self.assertEqual(0, _rect.polygon.points[3].y)
self.assertEqual(4, len(_rect.polygon.points))
def test_polygon_shifted(self):
_rect = rect2.Rect2(1, 1, vector2.Vector2(1, 1))
self.assertIsNotNone(_rect.polygon)
self.assertEqual(0, _rect.polygon.points[0].x)
self.assertEqual(0, _rect.polygon.points[0].y)
self.assertEqual(0, _rect.polygon.points[1].x)
self.assertEqual(1, _rect.polygon.points[1].y)
self.assertEqual(1, _rect.polygon.points[2].x)
self.assertEqual(1, _rect.polygon.points[2].y)
self.assertEqual(1, _rect.polygon.points[3].x)
self.assertEqual(0, _rect.polygon.points[3].y)
self.assertEqual(4, len(_rect.polygon.points))
def test_polygon_resized(self):
_rect = rect2.Rect2(1, 1)
self.assertIsNotNone(_rect.polygon)
self.assertEqual(0, _rect.polygon.points[0].x)
self.assertEqual(0, _rect.polygon.points[0].y)
self.assertEqual(0, _rect.polygon.points[1].x)
self.assertEqual(1, _rect.polygon.points[1].y)
self.assertEqual(1, _rect.polygon.points[2].x)
self.assertEqual(1, _rect.polygon.points[2].y)
self.assertEqual(1, _rect.polygon.points[3].x)
self.assertEqual(0, _rect.polygon.points[3].y)
self.assertEqual(4, len(_rect.polygon.points))
_rect.width = 3
self.assertIsNotNone(_rect.polygon)
self.assertEqual(0, _rect.polygon.points[0].x)
self.assertEqual(0, _rect.polygon.points[0].y)
self.assertEqual(0, _rect.polygon.points[1].x)
self.assertEqual(1, _rect.polygon.points[1].y)
self.assertEqual(3, _rect.polygon.points[2].x)
self.assertEqual(1, _rect.polygon.points[2].y)
self.assertEqual(3, _rect.polygon.points[3].x)
self.assertEqual(0, _rect.polygon.points[3].y)
self.assertEqual(4, len(_rect.polygon.points))
_rect.height = 0.5
self.assertIsNotNone(_rect.polygon)
self.assertEqual(0, _rect.polygon.points[0].x)
self.assertEqual(0, _rect.polygon.points[0].y)
self.assertEqual(0, _rect.polygon.points[1].x)
self.assertEqual(0.5, _rect.polygon.points[1].y)
self.assertEqual(3, _rect.polygon.points[2].x)
self.assertEqual(0.5, _rect.polygon.points[2].y)
self.assertEqual(3, _rect.polygon.points[3].x)
self.assertEqual(0, _rect.polygon.points[3].y)
self.assertEqual(4, len(_rect.polygon.points))
def test_area(self):
_rect = rect2.Rect2(1, 1)
self.assertEqual(1, _rect.area)
_rect.width = 3
self.assertEqual(3, _rect.area)
_rect.height = 7
self.assertEqual(21, _rect.area)
def test_project_onto_axis_horizontal_unshifted(self):
_rect = rect2.Rect2(3, 7)
proj = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(1, 0))
self.assertEqual(0, proj.min)
self.assertEqual(3, proj.max)
self.assertEqual(1, proj.axis.x)
self.assertEqual(0, proj.axis.y)
proj2 = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(-1, 0))
self.assertEqual(-3, proj2.min)
self.assertEqual(0, proj2.max)
self.assertEqual(-1, proj2.axis.x)
self.assertEqual(0, proj2.axis.y)
def test_project_onto_axis_vertical_unshifted(self):
_rect = rect2.Rect2(5, 11)
proj = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(0, 1))
self.assertEqual(0, proj.min)
self.assertEqual(11, proj.max)
self.assertEqual(0, proj.axis.x)
self.assertEqual(1, proj.axis.y)
proj2 = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(0, -1))
self.assertEqual(-11, proj2.min)
self.assertEqual(0, proj2.max)
self.assertEqual(0, proj2.axis.x)
self.assertEqual(-1, proj2.axis.y)
def test_project_onto_axis_diagonal_unshifted(self):
_rect = rect2.Rect2(1, 3)
_axis = vector2.Vector2(1, 1).normalize()
proj = rect2.Rect2.project_onto_axis(_rect, _axis)
self.assertAlmostEqual(0, proj.min)
self.assertAlmostEqual(2.82842712472, proj.max)
self.assertAlmostEqual(_axis.x, proj.axis.x)
self.assertAlmostEqual(_axis.y, proj.axis.y)
_axis2 = vector2.Vector2(-1, -1).normalize()
proj2 = rect2.Rect2.project_onto_axis(_rect, _axis2)
self.assertAlmostEqual(-2.82842712472, proj2.min)
self.assertAlmostEqual(0, proj2.max)
self.assertAlmostEqual(_axis2.x, proj2.axis.x)
self.assertAlmostEqual(_axis2.y, proj2.axis.y)
def test_project_onto_axis_horizontal_shifted(self):
_rect = rect2.Rect2(3, 2, vector2.Vector2(2, 2))
proj = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(1, 0))
self.assertEqual(2, proj.min)
self.assertEqual(5, proj.max)
self.assertEqual(1, proj.axis.x)
self.assertEqual(0, proj.axis.y)
proj2 = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(-1, 0))
self.assertEqual(-5, proj2.min)
self.assertEqual(-2, proj2.max)
self.assertEqual(-1, proj2.axis.x)
self.assertEqual(0, proj2.axis.y)
_rect2 = rect2.Rect2(3, 2, vector2.Vector2(-1, 2))
proj3 = rect2.Rect2.project_onto_axis(_rect2, vector2.Vector2(-1, 0))
self.assertEqual(-2, proj3.min)
self.assertEqual(1, proj3.max)
self.assertEqual(-1, proj3.axis.x)
self.assertEqual(0, proj3.axis.y)
def test_project_onto_axis_vertical_shifted(self):
_rect = rect2.Rect2(4, 7, vector2.Vector2(1, 3))
proj = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(0, 1))
self.assertEqual(3, proj.min)
self.assertEqual(10, proj.max)
self.assertEqual(0, proj.axis.x)
self.assertEqual(1, proj.axis.y)
proj2 = rect2.Rect2.project_onto_axis(_rect, vector2.Vector2(0, -1))
self.assertEqual(-10, proj2.min)
self.assertEqual(-3, proj2.max)
self.assertEqual(0, proj2.axis.x)
self.assertEqual(-1, proj2.axis.y)
_rect2 = rect2.Rect2(4, 7, vector2.Vector2(1, -2))
proj3 = rect2.Rect2.project_onto_axis(_rect2, vector2.Vector2(0, -1))
self.assertEqual(-5, proj3.min)
self.assertEqual(2, proj3.max)
self.assertEqual(0, proj3.axis.x)
self.assertEqual(-1, proj3.axis.y)
def test_project_onto_axis_diagonal_shifted(self):
_rect = rect2.Rect2(3, 5, vector2.Vector2(2, 2))
_axis = vector2.Vector2(1, 1).normalize()
proj = rect2.Rect2.project_onto_axis(_rect, _axis)
self.assertAlmostEqual(2.82842712, proj.min)
self.assertAlmostEqual(8.48528137, proj.max)
self.assertAlmostEqual(_axis.x, proj.axis.x)
self.assertAlmostEqual(_axis.y, proj.axis.y)
_axis2 = vector2.Vector2(-1, -1).normalize()
proj2 = rect2.Rect2.project_onto_axis(_rect, _axis2)
self.assertAlmostEqual(-8.48528137, proj2.min)
self.assertAlmostEqual(-2.82842712, proj2.max)
self.assertAlmostEqual(_axis2.x, proj2.axis.x)
self.assertAlmostEqual(_axis2.y, proj2.axis.y)
_rect2 = rect2.Rect2(3, 5, vector2.Vector2(-1, -2))
proj3 = rect2.Rect2.project_onto_axis(_rect2, _axis2)
self.assertAlmostEqual(-3.53553391, proj3.min)
self.assertAlmostEqual(2.12132034, proj3.max)
self.assertAlmostEqual(_axis2.x, proj3.axis.x)
self.assertAlmostEqual(_axis2.y, proj3.axis.y)
def test_contains_point_false(self):
_rect = rect2.Rect2(1, 2, vector2.Vector2(2, 2))
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(0, 0))
self.assertFalse(edge)
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(4, 2))
self.assertFalse(edge)
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(2, 5))
self.assertFalse(edge)
self.assertFalse(inner)
def test_contains_point_edge(self):
_rect = rect2.Rect2(3, 2, vector2.Vector2(-2, -2))
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(-2, -2))
self.assertTrue(edge, msg="mincorner")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(1, -2))
self.assertTrue(edge, msg="corner")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(1, 0))
self.assertTrue(edge, msg="maxcorner")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(-2, 0))
self.assertTrue(edge, msg="corner")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(-1, -2))
self.assertTrue(edge, msg="y-min side")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(0, 0))
self.assertTrue(edge, msg="y-max side")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(-2, -1))
self.assertTrue(edge, msg="x-min side")
self.assertFalse(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(1, -0.5))
self.assertTrue(edge, msg="x-max side, floating")
self.assertFalse(inner)
def test_contains_point_contained(self):
_rect = rect2.Rect2(4, 5, vector2.Vector2(3, 3))
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(5, 6))
self.assertFalse(edge)
self.assertTrue(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(5.5, 6.5))
self.assertFalse(edge)
self.assertTrue(inner)
edge, inner = rect2.Rect2.contains_point(_rect, vector2.Vector2(4.5, 7.5))
self.assertFalse(edge)
self.assertTrue(inner)
def _create_help_msg(*args):
# this function produced links for rects or polygons using _create_link
self = args[0]
allpts = []
result = ""
i = 1
while i < len(args):
a = args[i]
result += "\n\n"
is_rect = type(a) == rect2.Rect2
if is_rect:
result += "rect: {}\n".format(str(a))
pts = list(p + a.mincorner for p in a.polygon.points)
allpts += pts
result += polygon2.Polygon2._create_link(pts)
i += 1
else:
offset = args[i + 1]
result += "polygon: {} at {}\n".format(str(a), str(offset))
pts = list(p + offset for p in a.points)
allpts += pts
result += polygon2.Polygon2._create_link(pts)
i += 2
result += "\n\ntogether: {}".format(polygon2.Polygon2._create_link(allpts))
return result
def test_find_intersection_rect_poly_false(self):
_rect = rect2.Rect2(3, 2, vector2.Vector2(2, 1))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(0, 0.5)
visualize = self._create_help_msg(_rect, _poly, _offset)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect, _poly, _offset)
self.assertFalse(touching, msg=visualize)
self.assertFalse(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_rect_poly_edge(self):
_rect = rect2.Rect2(2, 1, vector2.Vector2(0, 2.118033988749895))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(0, 0.5)
visualize = self._create_help_msg(_rect, _poly, _offset)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect, _poly, _offset)
self.assertTrue(touching, msg=visualize)
self.assertFalse(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_rect_poly_mtv(self):
_rect = rect2.Rect2(1, 3, vector2.Vector2(0.5, -0.5))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(1, 0)
visualize = self._create_help_msg(_rect, _poly, _offset)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect, _poly, _offset)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNotNone(mtv, msg=visualize)
self.assertAlmostEqual(-0.5, mtv[0] * mtv[1].x)
self.assertAlmostEqual(0, mtv[0] * mtv[1].y)
def test_find_intersection_rect_poly_coll_findmtv_false(self):
_rect = rect2.Rect2(1, 3, vector2.Vector2(0.5, -0.5))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(1, 0)
visualize = self._create_help_msg(_rect, _poly, _offset)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect, _poly, _offset, find_mtv=False)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_poly_rect_false(self):
_rect = rect2.Rect2(3, 2, vector2.Vector2(2, 1))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(0, 0.5)
visualize = self._create_help_msg(_poly, _offset, _rect)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_poly, _offset, _rect)
self.assertFalse(touching, msg=visualize)
self.assertFalse(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_poly_rect_edge(self):
_rect = rect2.Rect2(2, 1, vector2.Vector2(0, 2.118033988749895))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(0, 0.5)
visualize = self._create_help_msg(_poly, _offset, _rect)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_poly, _offset, _rect)
self.assertTrue(touching, msg=visualize)
self.assertFalse(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_poly_rect_mtv(self):
_rect = rect2.Rect2(1, 3, vector2.Vector2(0.5, -0.5))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(1, 0)
visualize = self._create_help_msg(_poly, _offset, _rect)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_poly, _offset, _rect)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNotNone(mtv, msg=visualize)
self.assertAlmostEqual(0.5, mtv[0] * mtv[1].x)
self.assertAlmostEqual(0, mtv[0] * mtv[1].y)
def test_find_intersection_poly_rect_coll_findmtv_false(self):
_rect = rect2.Rect2(1, 3, vector2.Vector2(0.5, -0.5))
_poly = polygon2.Polygon2.from_regular(5, 1)
_offset = vector2.Vector2(1, 0)
visualize = self._create_help_msg(_poly, _offset, _rect)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_poly, _offset, _rect, find_mtv=False)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_rect_rect_false(self):
_rect1 = rect2.Rect2(2, 3, vector2.Vector2(0.5, 0.5))
_rect2 = rect2.Rect2(1, 1, vector2.Vector2(-1, 0))
visualize = self._create_help_msg(_rect1, _rect2)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect1, _rect2)
self.assertFalse(touching, msg=visualize)
self.assertFalse(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_rect_rect_edge(self):
_rect1 = rect2.Rect2(3, 4, vector2.Vector2(1, 0.70723))
_rect2 = rect2.Rect2(1, 1, vector2.Vector2(2, 4.70723))
visualize = self._create_help_msg(_rect1, _rect2)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect1, _rect2)
self.assertTrue(touching, msg=visualize)
self.assertFalse(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_find_intersection_rect_rect_mtv(self):
_rect1 = rect2.Rect2(3, 5, vector2.Vector2(-2, -6))
_rect2 = rect2.Rect2(2, 1, vector2.Vector2(0, -3))
visualize = self._create_help_msg(_rect1, _rect2)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect1, _rect2)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNotNone(mtv, msg=visualize)
self.assertEqual(-1, mtv[0] * mtv[1].x, msg="touching={}, overlapping={}, mtv={}\n\n{}".format(touching, overlapping, mtv, visualize))
self.assertEqual(0, mtv[0] * mtv[1].y, msg=visualize)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect2, _rect1)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNotNone(mtv, msg=visualize)
self.assertEqual(1, mtv[0] * mtv[1].x)
self.assertEqual(0, mtv[0] * mtv[1].y)
def test_find_intersection_rect_rect_coll_findmtv_false(self):
_rect1 = rect2.Rect2(3, 5, vector2.Vector2(-2, -6))
_rect2 = rect2.Rect2(2, 1, vector2.Vector2(0, -3))
visualize = self._create_help_msg(_rect1, _rect2)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect1, _rect2, find_mtv=False)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
touching, overlapping, mtv = rect2.Rect2.find_intersection(_rect2, _rect1, find_mtv=False)
self.assertFalse(touching, msg=visualize)
self.assertTrue(overlapping, msg=visualize)
self.assertIsNone(mtv, msg=visualize)
def test_repr(self):
unit_square = rect2.Rect2(1, 1, vector2.Vector2(3, 4))
self.assertEqual("rect2(width=1, height=1, mincorner=vector2(x=3, y=4))", repr(unit_square))
def test_str(self):
unit_square = rect2.Rect2(1, 1, vector2.Vector2(3, 4))
ugly_rect = rect2.Rect2(0.7071234, 0.7079876, vector2.Vector2(0.56789123, 0.876543))
self.assertEqual("rect(1x1 at <3, 4>)", str(unit_square))
self.assertEqual("rect(0.707x0.708 at <0.568, 0.877>)", str(ugly_rect))
| TestRect2 |
python | mlflow__mlflow | mlflow/telemetry/client.py | {
"start": 778,
"end": 13315
} | class ____:
def __init__(self):
self.info = asdict(
TelemetryInfo(
session_id=_MLFLOW_TELEMETRY_SESSION_ID.get() or uuid.uuid4().hex,
installation_id=get_or_create_installation_id(),
)
)
self._queue: Queue[list[Record]] = Queue(maxsize=MAX_QUEUE_SIZE)
self._lock = threading.RLock()
self._max_workers = MAX_WORKERS
self._is_stopped = False
self._is_active = False
self._atexit_callback_registered = False
self._batch_size = BATCH_SIZE
self._batch_time_interval = BATCH_TIME_INTERVAL_SECONDS
self._pending_records: list[Record] = []
self._last_batch_time = time.time()
self._batch_lock = threading.Lock()
# consumer threads for sending records
self._consumer_threads = []
self._is_config_fetched = False
self.config = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._clean_up()
def _fetch_config(self):
def _fetch():
try:
self._get_config()
if self.config is None:
self._is_stopped = True
_set_telemetry_client(None)
self._is_config_fetched = True
except Exception:
self._is_stopped = True
self._is_config_fetched = True
_set_telemetry_client(None)
self._config_thread = threading.Thread(
target=_fetch,
name="GetTelemetryConfig",
daemon=True,
)
self._config_thread.start()
def _get_config(self):
"""
Get the config for the given MLflow version.
"""
mlflow_version = self.info["mlflow_version"]
if config_url := _get_config_url(mlflow_version):
try:
response = requests.get(config_url, timeout=1)
if response.status_code != 200:
return
config = response.json()
if (
config.get("mlflow_version") != mlflow_version
or config.get("disable_telemetry") is True
or config.get("ingestion_url") is None
):
return
if get_source_sdk().value in config.get("disable_sdks", []):
return
if sys.platform in config.get("disable_os", []):
return
rollout_percentage = config.get("rollout_percentage", 100)
if random.randint(0, 100) > rollout_percentage:
return
self.config = TelemetryConfig(
ingestion_url=config["ingestion_url"],
disable_events=set(config.get("disable_events", [])),
)
except Exception as e:
_log_error(f"Failed to get telemetry config: {e}")
return
def add_record(self, record: Record):
"""
Add a record to be batched and sent to the telemetry server.
"""
if not self.is_active:
self.activate()
if self._is_stopped:
return
with self._batch_lock:
self._pending_records.append(record)
# Only send if we've reached the batch size;
# time-based sending is handled by the consumer thread.
if len(self._pending_records) >= self._batch_size:
self._send_batch()
def _send_batch(self):
"""Send the current batch of records."""
if not self._pending_records:
return
self._last_batch_time = time.time()
try:
self._queue.put(self._pending_records, block=False)
self._pending_records = []
except Full:
_log_error("Failed to add record to the queue, queue is full")
def _process_records(self, records: list[Record], request_timeout: float = 1):
"""Process a batch of telemetry records."""
try:
self._update_backend_store()
if self.info["tracking_uri_scheme"] in ["databricks", "databricks-uc", "uc"]:
self._is_stopped = True
# set config to None to allow consumer thread drop records in the queue
self.config = None
self.is_active = False
_set_telemetry_client(None)
return
records = [
{
"data": self.info | record.to_dict(),
# use random uuid as partition key to make sure records are
# distributed evenly across shards
"partition-key": uuid.uuid4().hex,
}
for record in records
]
# changing this value can affect total time for processing records
# the total time = request_timeout * max_attempts + sleep_time * (max_attempts - 1)
max_attempts = 3
sleep_time = 1
for i in range(max_attempts):
should_retry = False
response = None
try:
response = requests.post(
self.config.ingestion_url,
json={"records": records},
headers={"Content-Type": "application/json"},
timeout=request_timeout,
)
should_retry = response.status_code in RETRYABLE_ERRORS
except (ConnectionError, TimeoutError):
should_retry = True
# NB: DO NOT retry when terminating
# otherwise this increases shutdown overhead significantly
if self._is_stopped:
return
if i < max_attempts - 1 and should_retry:
# we do not use exponential backoff to avoid increasing
# the processing time significantly
time.sleep(sleep_time)
elif response and response.status_code in UNRECOVERABLE_ERRORS:
self._is_stopped = True
self.is_active = False
# this is executed in the consumer thread, so
# we cannot join the thread here, but this should
# be enough to stop the telemetry collection
return
else:
return
except Exception as e:
_log_error(f"Failed to send telemetry records: {e}")
def _consumer(self) -> None:
"""Individual consumer that processes records from the queue."""
# suppress logs in the consumer thread to avoid emitting any irrelevant
# logs during telemetry collection.
should_suppress_logs_in_thread.set(True)
while not self._is_config_fetched:
time.sleep(0.1)
while self.config and not self._is_stopped:
try:
records = self._queue.get(timeout=1)
except Empty:
# check if batch time interval has passed and send data if needed
if time.time() - self._last_batch_time >= self._batch_time_interval:
self._last_batch_time = time.time()
with self._batch_lock:
if self._pending_records:
self._send_batch()
continue
self._process_records(records)
self._queue.task_done()
# clear the queue if config is None
while self.config is None and not self._queue.empty():
try:
self._queue.get_nowait()
self._queue.task_done()
except Empty:
break
# drop remaining records when terminating to avoid
# causing any overhead
def activate(self) -> None:
"""Activate the async queue to accept and handle incoming tasks."""
with self._lock:
if self.is_active:
return
self._set_up_threads()
# only fetch config when activating to avoid fetching when
# no records are added
self._fetch_config()
# Callback to ensure remaining tasks are processed before program exit
if not self._atexit_callback_registered:
# This works in jupyter notebook
atexit.register(self._at_exit_callback)
self._atexit_callback_registered = True
self.is_active = True
@property
def is_active(self) -> bool:
return self._is_active
@is_active.setter
def is_active(self, value: bool) -> None:
self._is_active = value
def _set_up_threads(self) -> None:
"""Set up multiple consumer threads."""
with self._lock:
# Start multiple consumer threads
for i in range(self._max_workers):
consumer_thread = threading.Thread(
target=self._consumer,
name=f"MLflowTelemetryConsumer-{i}",
daemon=True,
)
consumer_thread.start()
self._consumer_threads.append(consumer_thread)
def _at_exit_callback(self) -> None:
"""Callback function executed when the program is exiting."""
try:
# Suppress logs/warnings during shutdown
# NB: this doesn't suppress log not emitted by mlflow
with suppress_logs_in_thread(), warnings.catch_warnings():
warnings.simplefilter("ignore")
self.flush(terminate=True)
except Exception as e:
_log_error(f"Failed to flush telemetry during termination: {e}")
def flush(self, terminate=False) -> None:
"""
Flush the async telemetry queue.
Args:
terminate: If True, shut down the telemetry threads after flushing.
"""
if not self.is_active:
return
if terminate:
# Full shutdown for termination - signal stop and exit immediately
self._is_stopped = True
self.is_active = False
# non-terminating flush is only used in tests
else:
self._config_thread.join(timeout=1)
# Send any pending records before flushing
with self._batch_lock:
if self._pending_records and self.config and not self._is_stopped:
self._send_batch()
# For non-terminating flush, just wait for queue to empty
try:
self._queue.join()
except Exception as e:
_log_error(f"Failed to flush telemetry: {e}")
def _update_backend_store(self):
"""
Backend store might be changed after mlflow is imported, we should use this
method to update the backend store info at sending telemetry step.
"""
try:
# import here to avoid circular import
from mlflow.tracking._tracking_service.utils import _get_tracking_scheme
self.info["tracking_uri_scheme"] = _get_tracking_scheme()
except Exception as e:
_log_error(f"Failed to update backend store: {e}")
def _clean_up(self):
"""Join all threads"""
self.flush(terminate=True)
for thread in self._consumer_threads:
if thread.is_alive():
thread.join(timeout=1)
_MLFLOW_TELEMETRY_CLIENT = None
_client_lock = threading.Lock()
def set_telemetry_client():
if is_telemetry_disabled():
# set to None again so this function can be used to
# re-initialize the telemetry client
_set_telemetry_client(None)
else:
try:
_set_telemetry_client(TelemetryClient())
except Exception as e:
_log_error(f"Failed to set telemetry client: {e}")
_set_telemetry_client(None)
def _set_telemetry_client(value: TelemetryClient | None):
global _MLFLOW_TELEMETRY_CLIENT
with _client_lock:
_MLFLOW_TELEMETRY_CLIENT = value
if value:
_MLFLOW_TELEMETRY_SESSION_ID.set(value.info["session_id"])
else:
_MLFLOW_TELEMETRY_SESSION_ID.unset()
def get_telemetry_client() -> TelemetryClient | None:
with _client_lock:
return _MLFLOW_TELEMETRY_CLIENT
| TelemetryClient |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/bigtable.py | {
"start": 1251,
"end": 1458
} | class ____(BaseGoogleLink):
"""Helper class for constructing Bigtable Instance link."""
name = "Bigtable Instance"
key = "instance_key"
format_str = BIGTABLE_INSTANCE_LINK
| BigtableInstanceLink |
python | pytest-dev__pytest | testing/test_assertion.py | {
"start": 2354,
"end": 13137
} | class ____:
@pytest.mark.parametrize("initial_conftest", [True, False])
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
def test_conftest_assertion_rewrite(
self, pytester: Pytester, initial_conftest, mode
) -> None:
"""Test that conftest files are using assertion rewrite on import (#1619)."""
pytester.mkdir("foo")
pytester.mkdir("foo/tests")
conftest_path = "conftest.py" if initial_conftest else "foo/conftest.py"
contents = {
conftest_path: """
import pytest
@pytest.fixture
def check_first():
def check(values, value):
assert values.pop(0) == value
return check
""",
"foo/tests/test_foo.py": """
def test(check_first):
check_first([10, 30], 30)
""",
}
pytester.makepyfile(**contents)
result = pytester.runpytest_subprocess(f"--assert={mode}")
if mode == "plain":
expected = "E AssertionError"
elif mode == "rewrite":
expected = "*assert 10 == 30*"
else:
assert 0
result.stdout.fnmatch_lines([expected])
def test_rewrite_assertions_pytester_plugin(self, pytester: Pytester) -> None:
"""
Assertions in the pytester plugin must also benefit from assertion
rewriting (#1920).
"""
pytester.makepyfile(
"""
pytest_plugins = ['pytester']
def test_dummy_failure(pytester): # how meta!
pytester.makepyfile('def test(): assert 0')
r = pytester.inline_run()
r.assertoutcome(passed=1)
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"> r.assertoutcome(passed=1)",
"E AssertionError: ([[][]], [[][]], [[]<TestReport *>[]])*",
"E assert {'failed': 1,... 'skipped': 0} == {'failed': 0,... 'skipped': 0}",
"E Omitting 1 identical items, use -vv to show",
"E Differing items:",
"E Use -v to get more diff",
]
)
# XXX: unstable output.
result.stdout.fnmatch_lines_random(
[
"E {'failed': 1} != {'failed': 0}",
"E {'passed': 0} != {'passed': 1}",
]
)
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
def test_pytest_plugins_rewrite(self, pytester: Pytester, mode) -> None:
contents = {
"conftest.py": """
pytest_plugins = ['ham']
""",
"ham.py": """
import pytest
@pytest.fixture
def check_first():
def check(values, value):
assert values.pop(0) == value
return check
""",
"test_foo.py": """
def test_foo(check_first):
check_first([10, 30], 30)
""",
}
pytester.makepyfile(**contents)
result = pytester.runpytest_subprocess(f"--assert={mode}")
if mode == "plain":
expected = "E AssertionError"
elif mode == "rewrite":
expected = "*assert 10 == 30*"
else:
assert 0
result.stdout.fnmatch_lines([expected])
@pytest.mark.parametrize("mode", ["str", "list"])
def test_pytest_plugins_rewrite_module_names(
self, pytester: Pytester, mode
) -> None:
"""Test that pluginmanager correct marks pytest_plugins variables
for assertion rewriting if they are defined as plain strings or
list of strings (#1888).
"""
plugins = '"ham"' if mode == "str" else '["ham"]'
contents = {
"conftest.py": f"""
pytest_plugins = {plugins}
""",
"ham.py": """
import pytest
""",
"test_foo.py": """
def test_foo(pytestconfig):
assert 'ham' in pytestconfig.pluginmanager.rewrite_hook._must_rewrite
""",
}
pytester.makepyfile(**contents)
result = pytester.runpytest_subprocess("--assert=rewrite")
assert result.ret == 0
def test_pytest_plugins_rewrite_module_names_correctly(
self, pytester: Pytester
) -> None:
"""Test that we match files correctly when they are marked for rewriting (#2939)."""
contents = {
"conftest.py": """\
pytest_plugins = "ham"
""",
"ham.py": "",
"hamster.py": "",
"test_foo.py": """\
def test_foo(pytestconfig):
assert pytestconfig.pluginmanager.rewrite_hook.find_spec('ham') is not None
assert pytestconfig.pluginmanager.rewrite_hook.find_spec('hamster') is None
""",
}
pytester.makepyfile(**contents)
result = pytester.runpytest_subprocess("--assert=rewrite")
assert result.ret == 0
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
@pytest.mark.parametrize("disable_plugin_autoload", ["env_var", "cli", ""])
@pytest.mark.parametrize("explicit_specify", ["env_var", "cli", ""])
def test_installed_plugin_rewrite(
self,
pytester: Pytester,
mode: str,
monkeypatch: pytest.MonkeyPatch,
disable_plugin_autoload: str,
explicit_specify: str,
) -> None:
args = ["mainwrapper.py", "-s", f"--assert={mode}"]
if disable_plugin_autoload == "env_var":
monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
elif disable_plugin_autoload == "cli":
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
args.append("--disable-plugin-autoload")
else:
assert disable_plugin_autoload == ""
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
name = "spamplugin"
if explicit_specify == "env_var":
monkeypatch.setenv("PYTEST_PLUGINS", name)
elif explicit_specify == "cli":
args.append("-p")
args.append(name)
else:
assert explicit_specify == ""
# Make sure the hook is installed early enough so that plugins
# installed via distribution package are rewritten.
pytester.mkdir("hampkg")
contents = {
"hampkg/__init__.py": """\
import pytest
@pytest.fixture
def check_first2():
def check(values, value):
assert values.pop(0) == value
return check
""",
"spamplugin.py": """\
import pytest
from hampkg import check_first2
@pytest.fixture
def check_first():
def check(values, value):
assert values.pop(0) == value
return check
""",
"mainwrapper.py": """\
import importlib.metadata
import pytest
class DummyEntryPoint(object):
name = 'spamplugin'
module_name = 'spam.py'
group = 'pytest11'
def load(self):
import spamplugin
return spamplugin
class DummyDistInfo(object):
version = '1.0'
files = ('spamplugin.py', 'hampkg/__init__.py')
entry_points = (DummyEntryPoint(),)
metadata = {'name': 'foo'}
def distributions():
return (DummyDistInfo(),)
importlib.metadata.distributions = distributions
pytest.main()
""",
"test_foo.py": """\
def test(check_first):
check_first([10, 30], 30)
def test2(check_first2):
check_first2([10, 30], 30)
""",
}
pytester.makepyfile(**contents)
result = pytester.run(sys.executable, *args)
if mode == "plain":
expected = "E AssertionError"
elif mode == "rewrite":
expected = "*assert 10 == 30*"
else:
assert 0
if not disable_plugin_autoload or explicit_specify:
result.assert_outcomes(failed=2)
result.stdout.fnmatch_lines([expected, expected])
else:
result.assert_outcomes(errors=2)
result.stdout.fnmatch_lines(
[
"E fixture 'check_first' not found",
"E fixture 'check_first2' not found",
]
)
def test_rewrite_ast(self, pytester: Pytester) -> None:
pytester.mkdir("pkg")
contents = {
"pkg/__init__.py": """
import pytest
pytest.register_assert_rewrite('pkg.helper')
""",
"pkg/helper.py": """
def tool():
a, b = 2, 3
assert a == b
""",
"pkg/plugin.py": """
import pytest, pkg.helper
@pytest.fixture
def tool():
return pkg.helper.tool
""",
"pkg/other.py": """
values = [3, 2]
def tool():
assert values.pop() == 3
""",
"conftest.py": """
pytest_plugins = ['pkg.plugin']
""",
"test_pkg.py": """
import pkg.other
def test_tool(tool):
tool()
def test_other():
pkg.other.tool()
""",
}
pytester.makepyfile(**contents)
result = pytester.runpytest_subprocess("--assert=rewrite")
result.stdout.fnmatch_lines(
[
">*assert a == b*",
"E*assert 2 == 3*",
">*assert values.pop() == 3*",
"E*AssertionError",
]
)
def test_register_assert_rewrite_checks_types(self) -> None:
with pytest.raises(TypeError):
pytest.register_assert_rewrite(["pytest_tests_internal_non_existing"]) # type: ignore
pytest.register_assert_rewrite(
"pytest_tests_internal_non_existing", "pytest_tests_internal_non_existing2"
)
| TestImportHookInstallation |
python | numpy__numpy | numpy/f2py/tests/test_crackfortran.py | {
"start": 12802,
"end": 13050
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "crackfortran", "gh23598.f90")]
@pytest.mark.slow
def test_function_rettype(self):
# gh-23598
assert self.module.intproduct(3, 4) == 12
| TestFunctionReturn |
python | python-pillow__Pillow | src/PIL/BlpImagePlugin.py | {
"start": 7807,
"end": 7948
} | class ____(NotImplementedError):
pass
def _accept(prefix: bytes) -> bool:
return prefix.startswith((b"BLP1", b"BLP2"))
| BLPFormatError |
python | sanic-org__sanic | sanic/logging/formatter.py | {
"start": 8728,
"end": 9141
} | class ____(AutoAccessFormatter):
IDENT_LIMIT = 5
MESSAGE_START = 42
PREFIX_FORMAT = (
f"{c.GREY}%(ident)s{{limit}}|%(asctime)s{c.END} "
f"%(levelname)s: {{start}}"
)
MESSAGE_FORMAT = (
f"{c.PURPLE}%(host)s {c.BLUE + c.BOLD}"
f"%(request)s{c.END} "
f"%(right)s%(status)s %(byte)s {c.GREY}%(duration)s{c.END}"
)
LOG_EXTRA = False
| ProdAccessFormatter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/types.py | {
"start": 2507,
"end": 2616
} | class ____(FromDictMixin):
display_value: str
actual_value: str
@dataclasses.dataclass
| ZohoPickListItem |
python | qdrant__qdrant-client | qdrant_client/local/distances.py | {
"start": 1723,
"end": 1994
} | class ____:
def __init__(self, target: list[float], context: list[ContextPair]):
self.target: types.NumpyArray = np.array(target)
self.context = context
assert not np.isnan(self.target).any(), "Target vector must not contain NaN"
| DiscoveryQuery |
python | optuna__optuna | optuna/samplers/nsgaii/_crossovers/_undx.py | {
"start": 295,
"end": 4138
} | class ____(BaseCrossover):
"""Unimodal Normal Distribution Crossover used by :class:`~optuna.samplers.NSGAIISampler`.
Generates child individuals from the three parents
using a multivariate normal distribution.
- `H. Kita, I. Ono and S. Kobayashi,
Multi-parental extension of the unimodal normal distribution crossover
for real-coded genetic algorithms,
Proceedings of the 1999 Congress on Evolutionary Computation-CEC99
(Cat. No. 99TH8406), 1999, pp. 1581-1588 Vol. 2
<https://doi.org/10.1109/CEC.1999.782672>`__
Args:
sigma_xi:
Parametrizes normal distribution from which ``xi`` is drawn.
sigma_eta:
Parametrizes normal distribution from which ``etas`` are drawn.
If not specified, defaults to ``0.35 / sqrt(len(search_space))``.
"""
n_parents = 3
def __init__(self, sigma_xi: float = 0.5, sigma_eta: float | None = None) -> None:
self._sigma_xi = sigma_xi
self._sigma_eta = sigma_eta
def _distance_from_x_to_psl(self, parents_params: np.ndarray) -> np.floating:
# The line connecting x1 to x2 is called psl (primary search line).
# Compute the 2-norm of the vector orthogonal to psl from x3.
e_12 = UNDXCrossover._normalized_x1_to_x2(
parents_params
) # Normalized vector from x1 to x2.
v_13 = parents_params[2] - parents_params[0] # Vector from x1 to x3.
v_12_3 = v_13 - np.dot(v_13, e_12) * e_12 # Vector orthogonal to v_12 through x3.
m_12_3 = np.linalg.norm(v_12_3, ord=2) # 2-norm of v_12_3.
return m_12_3
def _orthonormal_basis_vector_to_psl(self, parents_params: np.ndarray, n: int) -> np.ndarray:
# Compute orthogonal basis vectors for the subspace orthogonal to psl.
e_12 = UNDXCrossover._normalized_x1_to_x2(
parents_params
) # Normalized vector from x1 to x2.
basis_matrix = np.identity(n)
if np.count_nonzero(e_12) != 0:
basis_matrix[0] = e_12
basis_matrix_t = basis_matrix.T
Q, _ = np.linalg.qr(basis_matrix_t)
return Q.T[1:]
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
# https://doi.org/10.1109/CEC.1999.782672
# Section 2 Unimodal Normal Distribution Crossover
n = len(search_space_bounds)
xp = (parents_params[0] + parents_params[1]) / 2 # Section 2 (2).
d = parents_params[0] - parents_params[1] # Section 2 (3).
if self._sigma_eta is None:
sigma_eta = 0.35 / np.sqrt(n)
else:
sigma_eta = self._sigma_eta
etas = rng.normal(0, sigma_eta**2, size=n)
xi = rng.normal(0, self._sigma_xi**2)
es = self._orthonormal_basis_vector_to_psl(
parents_params, n
) # Orthonormal basis vectors of the subspace orthogonal to the psl.
one = xp # Section 2 (5).
two = xi * d # Section 2 (5).
if n > 1: # When n=1, there is no subsearch component.
three = np.zeros(n) # Section 2 (5).
D = self._distance_from_x_to_psl(parents_params) # Section 2 (4).
for i in range(n - 1):
three += etas[i] * es[i]
three *= D
child_params = one + two + three
else:
child_params = one + two
return child_params
@staticmethod
def _normalized_x1_to_x2(parents_params: np.ndarray) -> np.ndarray:
# Compute the normalized vector from x1 to x2.
v_12 = parents_params[1] - parents_params[0]
m_12 = np.linalg.norm(v_12, ord=2)
e_12 = v_12 / np.clip(m_12, 1e-10, None)
return e_12
| UNDXCrossover |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 58651,
"end": 59109
} | class ____(
legend_justification_right,
legend_justification_left,
legend_justification_top,
legend_justification_bottom,
legend_justification_inside,
):
"""
Justification of any legend
Parameters
----------
theme_element : Literal["left", "right", "center", "top", "bottom"] | \
float | tuple[float, float]
How to justify the entire group with 1 or more guides.
"""
| legend_justification |
python | dask__distributed | distributed/actor.py | {
"start": 8734,
"end": 9512
} | class ____(BaseActorFuture[_T]):
def __init__(self, io_loop: IOLoop):
self._io_loop = io_loop
self._event = LateLoopEvent()
self._out: _Error | _OK[_T] | None = None
def __await__(self) -> Generator[object, None, _T]:
return self._result().__await__()
def done(self) -> bool:
return self._event.is_set()
async def _result(self) -> _T:
await self._event.wait()
out = self._out
assert out is not None
return out.unwrap()
def _set_result(self, out: _Error | _OK[_T]) -> None:
self._out = out
self._event.set()
def result(self, timeout: str | timedelta | float | None = None) -> _T:
return sync(self._io_loop, self._result, callback_timeout=timeout)
| ActorFuture |
python | apache__airflow | providers/atlassian/jira/tests/unit/atlassian/jira/hooks/test_jira.py | {
"start": 1456,
"end": 2780
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, monkeypatch):
self.conn_id = "jira_default"
self.host = "https://localhost/jira/"
self.port = 443
self.login = "user"
self.password = "password"
self.proxies = None
monkeypatch.setenv(
f"AIRFLOW_CONN_{self.conn_id}".upper(),
connection_as_json(
Connection(
conn_id="jira_default",
conn_type="jira",
host="https://localhost/jira/",
port=443,
login="user",
password="password",
extra='{"verify": false, "project": "AIRFLOW"}',
)
),
)
def test_jira_client_connection(self, mocked_jira_client):
jira_hook = JiraHook(proxies=self.proxies)
mocked_jira_client.assert_called_once_with(
url=self.host,
username=self.login,
password=self.password,
verify_ssl=False,
proxies=self.proxies,
api_version="2",
api_root="rest/api",
)
assert isinstance(jira_hook.client, mock.Mock)
assert jira_hook.client.name == mocked_jira_client.return_value.name
| TestJiraHook |
python | mlflow__mlflow | mlflow/store/analytics/trace_correlation.py | {
"start": 804,
"end": 7127
} | class ____:
"""
Result of NPMI calculation containing both unsmoothed and smoothed values.
Attributes:
npmi: Unsmoothed NPMI value with explicit -1.0 rule for zero joint count.
Returns NaN when undefined (e.g., when filter1_count=0 or filter2_count=0).
npmi_smoothed: NPMI calculated with Jeffreys prior smoothing (alpha=0.5).
More robust for small sample sizes and confidence interval estimation.
"""
npmi: float
npmi_smoothed: float | None
def calculate_npmi_from_counts(
joint_count: int,
filter1_count: int,
filter2_count: int,
total_count: int,
) -> NPMIResult:
"""
Calculate both unsmoothed and smoothed NPMI from count data.
Implements the recommended policy for NPMI calculation:
- Returns NaN (undefined) when either filter has zero support (n1=0 or n2=0)
- Returns -1.0 for unsmoothed when filters never co-occur despite both having support
- Calculates smoothed version using Jeffreys prior for robustness
NPMI measures the association between two events, normalized to [-1, 1]:
- -1: Perfect negative correlation (events never co-occur)
- 0: Independence (events occur independently)
- 1: Perfect positive correlation (events always co-occur)
- NaN: Undefined (when one or both events have zero count)
Args:
joint_count: Number of times both events occur together
filter1_count: Number of times event 1 occurs
filter2_count: Number of times event 2 occurs
total_count: Total number of observations
Returns:
NPMIResult containing both unsmoothed and smoothed NPMI values.
Examples:
>>> result = calculate_npmi_from_counts(10, 20, 15, 100)
>>> result.npmi # Unsmoothed value
>>> result.npmi_smoothed # Smoothed value
"""
# No population
if total_count <= 0:
return NPMIResult(npmi=float("nan"), npmi_smoothed=float("nan"))
# Return NaN if either filter has zero support
if filter1_count == 0 or filter2_count == 0:
return NPMIResult(npmi=float("nan"), npmi_smoothed=float("nan"))
n11 = joint_count # Both occur
n10 = filter1_count - joint_count # Only filter1
n01 = filter2_count - joint_count # Only filter2
n00 = total_count - filter1_count - filter2_count + joint_count # Neither
if min(n11, n10, n01, n00) < 0:
# Inconsistent counts, return undefined
return NPMIResult(npmi=float("nan"), npmi_smoothed=float("nan"))
# Calculate unsmoothed NPMI with explicit -1.0 rule
if joint_count == 0 and filter1_count > 0 and filter2_count > 0:
npmi_unsmoothed = -1.0
else:
npmi_unsmoothed = _calculate_npmi_core(n11, n10, n01, n00, smoothing=0)
# Calculate smoothed NPMI for robustness
npmi_smoothed = _calculate_npmi_core(n11, n10, n01, n00, smoothing=JEFFREYS_PRIOR)
return NPMIResult(npmi=npmi_unsmoothed, npmi_smoothed=npmi_smoothed)
def _calculate_npmi_core(
n11: float,
n10: float,
n01: float,
n00: float,
smoothing: float = 0,
) -> float:
"""
Core NPMI calculation with optional smoothing.
Internal function that performs the actual NPMI calculation
on a 2x2 contingency table with optional additive smoothing.
Args:
n11: Count of both events occurring
n10: Count of only event 1 occurring
n01: Count of only event 2 occurring
n00: Count of neither event occurring
smoothing: Additive smoothing parameter (0 for no smoothing)
Returns:
NPMI value in [-1, 1], or NaN if undefined.
"""
n11_s = n11 + smoothing
n10_s = n10 + smoothing
n01_s = n01 + smoothing
n00_s = n00 + smoothing
N = n11_s + n10_s + n01_s + n00_s
n1 = n11_s + n10_s # Total event 1 count
n2 = n11_s + n01_s # Total event 2 count
# NB: When marginals are zero (degenerate cases where no events occur), we return NaN
# rather than forcing a sentinel value like -1. This is mathematically correct since
# PMI is undefined when P(x)=0 or P(y)=0 (division by zero). NaN properly represents
# this undefined state and can be handled by our RPC layer, providing a more accurate
# signal than an arbitrary sentinel value.
if n1 <= 0 or n2 <= 0 or n11_s <= 0:
return float("nan")
# Handle perfect co-occurrence - check pre-smoothing values
# With smoothing, n11_s == N is never true since smoothing adds mass to other cells
if n10 == 0 and n01 == 0 and n00 == 0:
# Perfect co-occurrence: both events always occur together
return 1.0
# Calculate PMI using log-space arithmetic for numerical stability
# PMI = log(P(x,y) / (P(x) * P(y))) = log(n11*N / (n1*n2))
log_n11 = math.log(n11_s)
log_N = math.log(N)
log_n1 = math.log(n1)
log_n2 = math.log(n2)
pmi = (log_n11 + log_N) - (log_n1 + log_n2)
# Normalize by -log(P(x,y)) to get NPMI
denominator = -(log_n11 - log_N) # -log(n11/N)
npmi = pmi / denominator
# Clamp to [-1, 1] to handle floating point errors
return max(-1.0, min(1.0, npmi))
def calculate_smoothed_npmi(
joint_count: int,
filter1_count: int,
filter2_count: int,
total_count: int,
smoothing: float = JEFFREYS_PRIOR,
) -> float:
"""
Calculate smoothed NPMI for confidence interval estimation.
This function applies additive smoothing (Jeffreys prior by default) to all cells
of the contingency table. Used for uncertainty quantification via Dirichlet sampling.
Args:
joint_count: Number of times both events occur together
filter1_count: Number of times event 1 occurs
filter2_count: Number of times event 2 occurs
total_count: Total number of observations
smoothing: Additive smoothing parameter (default: JEFFREYS_PRIOR = 0.5)
Returns:
Smoothed NPMI value in [-1, 1], or NaN if undefined.
"""
if total_count <= 0:
return float("nan")
n11 = joint_count
n10 = filter1_count - joint_count
n01 = filter2_count - joint_count
n00 = total_count - filter1_count - filter2_count + joint_count
if min(n11, n10, n01, n00) < 0:
return float("nan")
return _calculate_npmi_core(n11, n10, n01, n00, smoothing)
| NPMIResult |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.