language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/checkpointing/test_model_checkpoint.py
|
{
"start": 41440,
"end": 41673
}
|
class ____(Callback):
def on_validation_epoch_start(self, trainer, pl_module):
if not trainer.sanity_checking and trainer.current_epoch == 1:
raise RuntimeError("Trouble!")
|
TroubledCallbackOnValidationEpochStart
|
python
|
mahmoud__glom
|
glom/test/test_target_types.py
|
{
"start": 238,
"end": 7621
}
|
class ____(E):
pass
def test_types_leave_one_out():
ALL_TYPES = [A, B, C, D, E, F]
for cur_t in ALL_TYPES:
treg = TargetRegistry(register_default_types=False)
treg.register(object, get=lambda: object)
for t in ALL_TYPES:
if t is cur_t:
continue
treg.register(t, get=(lambda t: lambda: t)(t))
obj = cur_t()
assert treg.get_handler('get', obj)() == obj.__class__.mro()[1]
if cur_t is E:
assert treg.get_handler('get', obj)() is C # sanity check
return
def test_types_bare():
glommer = Glommer(register_default_types=False)
treg = glommer.scope[TargetRegistry]
assert treg._get_closest_type(object(), treg._op_type_tree.get('get', {})) is None
# test that bare glommers can't glom anything
with pytest.raises(UnregisteredTarget) as exc_info:
glommer.glom(object(), {'object_repr': '__class__.__name__'})
assert repr(exc_info.value) == "UnregisteredTarget('get', <type 'object'>, OrderedDict(), ('__class__',))"
assert str(exc_info.value).find(
"glom() called without registering any types for operation 'get'."
" see glom.register() or Glommer's constructor for details.") != -1
with pytest.raises(UnregisteredTarget, match='without registering') as exc_info:
glommer.glom([{'hi': 'hi'}], ['hi'])
assert not exc_info.value.type_map
glommer.register(object, get=getattr)
glommer.register(dict, get=dict.__getitem__, exact=True)
# check again that registering object for 'get' doesn't change the
# fact that we don't have iterate support yet
with pytest.raises(UnregisteredTarget) as exc_info:
glommer.glom({'test': [{'hi': 'hi'}]}, ('test', ['hi']))
# feel free to update the "(at ['test'])" part to improve path display
assert (
"target type 'list' not registered for 'iterate', "
"expected one of registered types: (dict)" in str(exc_info.value))
return
def test_invalid_register():
glommer = Glommer()
with pytest.raises(TypeError):
glommer.register(1)
return
def test_exact_register():
glommer = Glommer(register_default_types=False)
class BetterList(list):
pass
glommer.register(BetterList, iterate=iter, exact=True)
expected = [0, 2, 4]
value = glommer.glom(BetterList(range(3)), [lambda x: x * 2])
assert value == expected
with pytest.raises(UnregisteredTarget):
glommer.glom(list(range(3)), ['unused'])
return
def test_duck_register():
class LilRanger:
def __init__(self):
self.lil_list = list(range(5))
def __iter__(self):
return iter(self.lil_list)
glommer = Glommer(register_default_types=False)
target = LilRanger()
with pytest.raises(UnregisteredTarget):
float_range = glommer.glom(target, [float])
glommer.register(LilRanger)
float_range = glommer.glom(target, [float])
assert float_range == [0.0, 1.0, 2.0, 3.0, 4.0]
glommer = Glommer() # now with just defaults
float_range = glommer.glom(target, [float])
assert float_range == [0.0, 1.0, 2.0, 3.0, 4.0]
def test_bypass_getitem():
target = list(range(3)) * 3
with pytest.raises(PathAccessError):
glom.glom(target, 'count')
res = glom.glom(target, lambda list_obj: list_obj.count(1))
assert res == 3
def test_iter_set():
some_ints = set(range(5))
some_floats = glom.glom(some_ints, [float])
assert sorted(some_floats) == [0.0, 1.0, 2.0, 3.0, 4.0]
# now without defaults
glommer = Glommer(register_default_types=False)
glommer.register(set, iterate=iter)
some_floats = glom.glom(some_ints, [float])
assert sorted(some_floats) == [0.0, 1.0, 2.0, 3.0, 4.0]
def test_iter_str():
# check that strings are not iterable by default, one of the most
# common sources of bugs
glom_buddy = 'kurt'
with pytest.raises(UnregisteredTarget):
glom.glom(glom_buddy, {'name': [glom_buddy]})
# also check that someone can override this
glommer = Glommer()
glommer.register(str, iterate=iter)
res = glommer.glom(glom_buddy, {'name_chars_for_some_reason': [str]})
assert len(res['name_chars_for_some_reason']) == 4
# the better way, for any dissenter reading this
assert glom.glom(glom_buddy, {'name_chars': list}) == {'name_chars': ['k', 'u', 'r', 't']}
# and for the really passionate: how about making strings
# non-iterable and just giving them a .chars() method that returns
# a list of single-character strings.
def test_default_scope_register():
# just hit it to make sure it exists, it behaves exactly like Glommer.register
glom.register(type, exact=False)
def test_faulty_iterate():
glommer = Glommer()
def bad_iter(obj):
raise RuntimeError('oops')
glommer.register(str, iterate=bad_iter)
with pytest.raises(TypeError):
glommer.glom({'a': 'fail'}, ('a', {'chars': [str]}))
def test_faulty_op_registration():
treg = TargetRegistry()
with pytest.raises(TypeError, match="text name, not:"):
treg.register_op(None, len)
with pytest.raises(TypeError, match="callable, not:"):
treg.register_op('fake_op', object())
class NewType:
pass
def _autodiscover_raise(type_obj):
raise Exception('noperino')
with pytest.raises(TypeError, match="noperino"):
treg.register_op('fake_op', _autodiscover_raise)
assert 'fake_op' not in treg._op_auto_map
# check op with no autodiscovery
treg.register_op('lol', exact=True)
lol_type_map = treg.get_type_map('lol')
assert all([v is False for v in lol_type_map.values()])
# check op reregistration, this time not exact
assert not treg._op_type_tree.get('lol')
treg.register_op('lol', exact=False)
assert treg._op_type_tree.get('lol')
def _autodiscover_faulty_return(type_obj):
return 'hideeho'
with pytest.raises(TypeError, match="hideeho"):
treg.register_op('fake_op', _autodiscover_faulty_return)
def _autodiscover_sneaky(type_obj):
# works with default registrations, but fails later on sets and frozensets
if type_obj is set:
return 'this should have been False or a callable, but was intentionally a string'
if type_obj is frozenset:
raise ValueError('this should have been False or a callable, but was intentionally a ValueError')
return False
treg.register_op('sneak', _autodiscover_sneaky)
with pytest.raises(TypeError, match="intentionally a string"):
treg.register(set)
with pytest.raises(TypeError, match="intentionally a ValueError"):
treg.register(frozenset)
return
def test_reregister_type():
treg = TargetRegistry()
class NewType:
pass
treg.register(NewType, op=lambda obj: obj)
obj = NewType()
handler = treg.get_handler('op', obj)
assert handler(obj) == obj
# assert no change in reregistering same
treg.register(NewType, op=lambda obj: obj)
handler = treg.get_handler('op', obj)
assert handler(obj) == obj
# assert change in reregistering new
treg.register(NewType, op=lambda obj: obj.__class__.__name__)
handler = treg.get_handler('op', obj)
assert handler(obj) == 'NewType'
|
F
|
python
|
openai__openai-python
|
tests/api_resources/test_moderations.py
|
{
"start": 2059,
"end": 3916
}
|
class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
moderation = await async_client.moderations.create(
input="I want to kill them.",
)
assert_matches_type(ModerationCreateResponse, moderation, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
moderation = await async_client.moderations.create(
input="I want to kill them.",
model="string",
)
assert_matches_type(ModerationCreateResponse, moderation, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.moderations.with_raw_response.create(
input="I want to kill them.",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
moderation = response.parse()
assert_matches_type(ModerationCreateResponse, moderation, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.moderations.with_streaming_response.create(
input="I want to kill them.",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
moderation = await response.parse()
assert_matches_type(ModerationCreateResponse, moderation, path=["response"])
assert cast(Any, response.is_closed) is True
|
TestAsyncModerations
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/redshift_cluster.py
|
{
"start": 1474,
"end": 4609
}
|
class ____(AwsBaseSensor[RedshiftHook]):
"""
Waits for a Redshift cluster to reach a specific status.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RedshiftClusterSensor`
:param cluster_identifier: The identifier for the cluster being pinged.
:param target_status: The cluster status desired.
:param deferrable: Run operator in the deferrable mode.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
"""
template_fields: Sequence[str] = aws_template_fields("cluster_identifier", "target_status")
aws_hook_class = RedshiftHook
def __init__(
self,
*,
cluster_identifier: str,
target_status: str = "available",
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.target_status = target_status
self.deferrable = deferrable
def poke(self, context: Context) -> bool:
current_status = self.hook.cluster_status(self.cluster_identifier)
self.log.info(
"Poked cluster %s for status '%s', found status '%s'",
self.cluster_identifier,
self.target_status,
current_status,
)
return current_status == self.target_status
def execute(self, context: Context) -> None:
if not self.deferrable:
super().execute(context=context)
elif not self.poke(context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=RedshiftClusterTrigger(
aws_conn_id=self.aws_conn_id,
cluster_identifier=self.cluster_identifier,
target_status=self.target_status,
poke_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
status = validated_event["status"]
if status == "error":
raise AirflowException(f"{validated_event['status']}: {validated_event['message']}")
if status == "success":
self.log.info("%s completed successfully.", self.task_id)
self.log.info("Cluster Identifier %s is in %s state", self.cluster_identifier, self.target_status)
|
RedshiftClusterSensor
|
python
|
walkccc__LeetCode
|
solutions/501. Find Mode in Binary Search Tree/501.py
|
{
"start": 0,
"end": 715
}
|
class ____:
def findMode(self, root: TreeNode | None) -> list[int]:
self.ans = []
self.pred = None
self.count = 0
self.maxCount = 0
def updateCount(root: TreeNode | None) -> None:
if self.pred and self.pred.val == root.val:
self.count += 1
else:
self.count = 1
if self.count > self.maxCount:
self.maxCount = self.count
self.ans = [root.val]
elif self.count == self.maxCount:
self.ans.append(root.val)
self.pred = root
def inorder(root: TreeNode | None) -> None:
if not root:
return
inorder(root.left)
updateCount(root)
inorder(root.right)
inorder(root)
return self.ans
|
Solution
|
python
|
huggingface__transformers
|
tests/models/bark/test_modeling_bark.py
|
{
"start": 6848,
"end": 11960
}
|
class ____:
def __init__(
self,
parent,
batch_size=3, # need batch_size != num_hidden_layers
seq_length=4,
is_training=False, # for now training is not supported
use_input_mask=True,
use_labels=True,
vocab_size=33,
output_vocab_size=33,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=15,
dropout=0.1,
window_size=256,
initializer_range=0.02,
n_codes_total=8, # for BarkFineModel
n_codes_given=1, # for BarkFineModel
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.output_vocab_size = output_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.window_size = window_size
self.initializer_range = initializer_range
self.bos_token_id = output_vocab_size - 1
self.eos_token_id = output_vocab_size - 1
self.pad_token_id = output_vocab_size - 1
self.n_codes_total = n_codes_total
self.n_codes_given = n_codes_given
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
inputs_dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
def get_config(self):
return BarkCoarseConfig(
vocab_size=self.vocab_size,
output_vocab_size=self.output_vocab_size,
hidden_size=self.hidden_size,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
window_size=self.window_size,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
config.output_vocab_size = 300
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BarkCoarseModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"logits"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
# test no attention_mask works
outputs = model(input_ids, use_cache=True)
_, past_key_values = outputs.to_tuple()
output_from_no_past = model(next_input_ids)["logits"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
|
BarkCoarseModelTester
|
python
|
walkccc__LeetCode
|
solutions/133. Clone Graph/133-2.py
|
{
"start": 0,
"end": 349
}
|
class ____:
def cloneGraph(self, node: 'Node') -> 'Node':
if not node:
return None
if node in self.map:
return self.map[node]
newNode = Node(node.val, [])
self.map[node] = newNode
for neighbor in node.neighbors:
self.map[node].neighbors.append(self.cloneGraph(neighbor))
return newNode
map = {}
|
Solution
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/__init__.py
|
{
"start": 17861,
"end": 17922
}
|
class ____(CPPObject):
object_type = 'union'
|
CPPUnionObject
|
python
|
huggingface__transformers
|
tests/models/mvp/test_modeling_mvp.py
|
{
"start": 15694,
"end": 21146
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(MvpModel, MvpForConditionalGeneration, MvpForSequenceClassification, MvpForQuestionAnswering)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": MvpModel,
"fill-mask": MvpForConditionalGeneration,
"question-answering": MvpForQuestionAnswering,
"summarization": MvpForConditionalGeneration,
"text-classification": MvpForSequenceClassification,
"text-generation": MvpForCausalLM,
"text2text-generation": MvpForConditionalGeneration,
"translation": MvpForConditionalGeneration,
"zero-shot": MvpForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if (
pipeline_test_case_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def setUp(self):
self.model_tester = MvpModelTester(self)
self.config_tester = ConfigTester(self, config_class=MvpConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# MvpForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MvpModel, MvpForConditionalGeneration, MvpForQuestionAnswering):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@require_sentencepiece
@require_tokenizers
|
MvpModelTest
|
python
|
huggingface__transformers
|
src/transformers/models/mistral3/modular_mistral3.py
|
{
"start": 1180,
"end": 1230
}
|
class ____(MistralRMSNorm):
pass
|
Mistral3RMSNorm
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/template.py
|
{
"start": 23762,
"end": 31941
}
|
class ____(Template):
"""Wrap a function to aid in variable sharing in Eager mode.
Templates are functions that create variables the first time they are called
and reuse them thereafter. See `make_template` for full documentation.
Note: By default, the full variable scope is captured at the time of first
call. If `create_scope_now` is passed as True to the constructor, the full
scope will be captured there, but no variables will be created until the first
call.
"""
def __init__(self,
name,
func,
create_scope_now=False,
custom_getter=None,
create_graph_function=False):
"""Creates a template for the given function.
Args:
name: A name for the scope created by this template. The name will be made
unique by appending `_N` to the it (see how
`tf.compat.v1.variable_scope` treats the `default_name` for details).
func: The function to apply each time.
create_scope_now: Whether to create the scope at Template construction
time, rather than first call. Defaults to false. Creating the scope at
construction time may be more convenient if the template is passed
through much lower level code, and you want to be sure of the scope name
without knowing exactly where it will be first called. If set to True,
the scope will be created in the constructor, and all subsequent times
in `__call__`, leading to a trailing numeral being added to the names of
all created Tensors. If set to False, the scope will be created at the
first call location.
custom_getter: optional custom getter to pass to `variable_scope()`
create_graph_function: When True, `func` will be executed as a graph
function. Enabling this flag allows the caller to reap the performance
benefits associated with executing graphs, at the cost of sacrificing
debuggability; however, not all Python functions can be compiled into
graph functions. See the documentation for `function.defun` for details.
Raises:
RuntimeError: if eager execution is not enabled.
"""
if not context.executing_eagerly():
raise RuntimeError(
"{} objects can only be used when eager execution is enabled, use "
"tf.Template for graph construction".format(type(self)))
super(EagerTemplate, self).__init__(name, func, create_scope_now, None,
custom_getter, create_graph_function)
if self._variable_scope is not None:
variable_scope_name = self._variable_scope.name
else:
# Defer setting the variable scope name until the variable scope
# is created in __call__.
variable_scope_name = None
self._template_store = _EagerTemplateVariableStore(variable_scope_name)
self._variable_scope_context_manager = None
def _call_func(self, args, kwargs):
try:
vars_at_start = self._template_store.variables()
trainable_at_start = self._template_store.trainable_variables()
if self._variables_created:
result = self._func(*args, **kwargs)
else:
# The first time we run, restore variables if necessary (via
# Trackable).
with trackable_util.capture_dependencies(template=self):
result = self._func(*args, **kwargs)
if self._variables_created:
# Variables were previously created, implying this is not the first
# time the template has been called. Check to make sure that no new
# trainable variables were created this time around.
trainable_variables = self._template_store.trainable_variables()
# If a variable that we intend to train is created as a side effect
# of creating a template, then that is almost certainly an error.
if len(trainable_at_start) != len(trainable_variables):
raise ValueError(
"Trainable variable created when calling a template "
"after the first time, perhaps you used tf.Variable "
"when you meant tf.get_variable: %s" % list(
object_identity.ObjectIdentitySet(trainable_variables) -
object_identity.ObjectIdentitySet(trainable_at_start)))
# Non-trainable tracking variables are a legitimate reason why a new
# variable would be created, but it is a relatively advanced use-case,
# so log it.
variables = self._template_store.variables()
if len(vars_at_start) != len(variables):
logging.info(
"New variables created when calling a template after "
"the first time, perhaps you used tf.Variable when you "
"meant tf.get_variable: %s",
list(
object_identity.ObjectIdentitySet(variables) -
object_identity.ObjectIdentitySet(vars_at_start)))
else:
self._variables_created = True
return result
except Exception as exc:
# Reraise the exception, but append the original definition to the
# trace.
args = exc.args
if not args:
arg0 = ""
else:
arg0 = args[0]
trace = "".join(
_skip_common_stack_elements(self._stacktrace,
traceback.format_stack()))
arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace)
new_args = [arg0]
new_args.extend(args[1:])
exc.args = tuple(new_args)
raise
def __call__(self, *args, **kwargs):
# In both branches below, the template store is installed as default after
# the variable scope is opened in order to ensure that templates nested at
# the same level correctly uniquify lower variable scope names.
if self._variable_scope:
# Create a cache for the variable scope context manager the first time
# around so that we don't have to keep recreating it.
if not self._variable_scope_context_manager:
self._variable_scope_context_manager = variable_scope.variable_scope(
self._variable_scope, reuse=variable_scope.AUTO_REUSE)
with self._variable_scope_context_manager:
with self._template_store.as_default():
return self._call_func(args, kwargs)
else:
# The scope was not created at construction time, so create it here.
# Subsequent calls should reuse variables.
with variable_scope.variable_scope(
self._unique_name, self._name,
custom_getter=self._custom_getter) as vs:
self._variable_scope = vs
# Because the scope was not created at construction time, the template
# store's variable scope name is unset; set it here.
self._template_store.set_variable_scope_name(vs.name)
with self._template_store.as_default():
return self._call_func(args, kwargs)
@property
def variables(self):
"""Returns the list of variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self._template_store.variables()
@property
def trainable_variables(self):
"""Returns the list of trainable variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self._template_store.trainable_variables()
@property
def non_trainable_variables(self):
"""Returns the list of non-trainable variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self._template_store.non_trainable_variables()
@property
def global_variables(self):
"""Returns the list of global variables created by the Template."""
# Currently there is no local variable in Eager mode.
if not self._variables_created:
return []
return self.variables
@property
def local_variables(self):
"""Returns the list of global variables created by the Template."""
# Currently there is no local variable in Eager mode.
return []
|
EagerTemplate
|
python
|
kamyu104__LeetCode-Solutions
|
Python/check-if-matrix-is-x-matrix.py
|
{
"start": 39,
"end": 309
}
|
class ____(object):
def checkXMatrix(self, grid):
"""
:type grid: List[List[int]]
:rtype: bool
"""
return all((i-j == 0 or i+j == len(grid)-1) == (grid[i][j] != 0) for i in xrange(len(grid)) for j in xrange(len(grid[0])))
|
Solution
|
python
|
django__django
|
django/db/migrations/operations/fields.py
|
{
"start": 9603,
"end": 12787
}
|
class ____(FieldOperation):
"""Rename a field on the model. Might affect db_column too."""
category = OperationCategory.ALTERATION
def __init__(self, model_name, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super().__init__(model_name, old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"old_name": self.old_name,
"new_name": self.new_name,
}
return (self.__class__.__name__, [], kwargs)
def state_forwards(self, app_label, state):
state.rename_field(
app_label, self.model_name_lower, self.old_name, self.new_name
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field(self.old_name),
to_model._meta.get_field(self.new_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.model_name)
schema_editor.alter_field(
from_model,
from_model._meta.get_field(self.new_name),
to_model._meta.get_field(self.old_name),
)
def describe(self):
return "Rename field %s on %s to %s" % (
self.old_name,
self.model_name,
self.new_name,
)
@property
def migration_name_fragment(self):
return "rename_%s_%s_%s" % (
self.old_name_lower,
self.model_name_lower,
self.new_name_lower,
)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
name.lower() == self.old_name_lower or name.lower() == self.new_name_lower
)
def reduce(self, operation, app_label):
if (
isinstance(operation, RenameField)
and self.is_same_model_operation(operation)
and self.new_name_lower == operation.old_name_lower
):
return [replace(self, new_name=operation.new_name)]
# Skip `FieldOperation.reduce` as we want to run `references_field`
# against self.old_name and self.new_name.
return super(FieldOperation, self).reduce(operation, app_label) or not (
operation.references_field(self.model_name, self.old_name, app_label)
or operation.references_field(self.model_name, self.new_name, app_label)
)
|
RenameField
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/filters.py
|
{
"start": 26892,
"end": 27804
}
|
class ____(PrefectOperatorFilterBaseModel):
"""Filter by `TaskRun.flow_run_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of task run flow run ids to include"
)
is_null_: Optional[bool] = Field(
default=False, description="Filter for task runs with None as their flow run id"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.is_null_ is True:
filters.append(db.TaskRun.flow_run_id.is_(None))
elif self.is_null_ is False and self.any_ is None:
filters.append(db.TaskRun.flow_run_id.is_not(None))
else:
if self.any_ is not None:
filters.append(db.TaskRun.flow_run_id.in_(self.any_))
return filters
|
TaskRunFilterFlowRunId
|
python
|
getsentry__sentry
|
src/sentry/replays/lib/new_query/fields.py
|
{
"start": 6822,
"end": 7039
}
|
class ____(ColumnField[int]):
@property
def expression(self) -> Function:
return Function(
"sum", parameters=[Function("length", parameters=[Column(self.column_name)])]
)
|
SumLengthField
|
python
|
keras-team__keras
|
guides/making_new_layers_and_models_via_subclassing.py
|
{
"start": 7126,
"end": 7334
}
|
class ____(keras.layers.Layer):
...
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
```
And this would be the equivalent PyTorch-specific layer:
```python
import torch
|
Linear
|
python
|
allegroai__clearml
|
clearml/utilities/plotlympl/renderer.py
|
{
"start": 708,
"end": 32427
}
|
class ____(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self) -> None:
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = dict(data=[], layout={})
self.mpl_fig = None
self.current_mpl_ax = None
self.bar_containers = None
self.current_bars = []
self.current_bars_names = []
self.axis_ct = 0
self.x_is_mpl_date = False
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
def open_figure(self, fig: Any, props: dict) -> None:
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig["layout"] = dict(
width=int(props["figwidth"] * props["dpi"]),
height=int(props["figheight"] * props["dpi"]),
autosize=False,
hovermode="closest",
)
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = dict(
l=int(self.mpl_x_bounds[0] * self.plotly_fig["layout"]["width"]),
r=int((1 - self.mpl_x_bounds[1]) * self.plotly_fig["layout"]["width"]),
t=int((1 - self.mpl_y_bounds[1]) * self.plotly_fig["layout"]["height"]),
b=int(self.mpl_y_bounds[0] * self.plotly_fig["layout"]["height"]),
pad=0,
)
self.plotly_fig["layout"]["margin"] = margin
def close_figure(self, fig: Any) -> None:
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.plotly_fig["layout"]["showlegend"] = False
self.msg += "Closing figure\n"
def open_axes(self, ax: Any, props: dict) -> None:
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.current_mpl_ax = ax
self.bar_containers = [c for c in ax.containers if c.__class__.__name__ == "BarContainer"] # empty is OK
self.current_bars = []
# set defaults in axes
xaxis = dict(anchor="y{0}".format(self.axis_ct or ""), zeroline=False, ticks="inside")
yaxis = dict(anchor="x{0}".format(self.axis_ct or ""), zeroline=False, ticks="inside")
zaxis = dict(anchor="x{0}".format(self.axis_ct or ""), zeroline=False, ticks="inside")
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis, mpl_zaxis = mpltools.prep_xyz_axis(
ax=ax, props=props, x_bounds=self.mpl_x_bounds, y_bounds=self.mpl_y_bounds
)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
zaxis.update(mpl_zaxis)
bottom_spine = mpltools.get_spine_visible(ax, "bottom")
top_spine = mpltools.get_spine_visible(ax, "top")
left_spine = mpltools.get_spine_visible(ax, "left")
right_spine = mpltools.get_spine_visible(ax, "right")
xaxis["mirror"] = mpltools.get_axis_mirror(bottom_spine, top_spine)
yaxis["mirror"] = mpltools.get_axis_mirror(left_spine, right_spine)
xaxis["showline"] = bottom_spine
yaxis["showline"] = top_spine
# put axes in our figure
self.plotly_fig["layout"]["xaxis{0}".format(self.axis_ct or "")] = xaxis
self.plotly_fig["layout"]["yaxis{0}".format(self.axis_ct or "")] = yaxis
if mpl_zaxis:
self.plotly_fig["layout"]["zaxis{0}".format(self.axis_ct or "")] = zaxis
# let all subsequent dates be handled properly if required
if xaxis.get("type") == "date":
self.x_is_mpl_date = True
self.axis_ct += 1
def close_axes(self, ax: Any) -> None:
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_bars variable should be empty unless a bar
chart has been created.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
if self.current_bars:
# noinspection PyBroadException
try:
self.current_bars_names = [n.get_text() for n in ax.legend().texts]
except Exception:
pass
self.draw_bars(self.current_bars)
self.msg += " Closing axes\n"
self.x_is_mpl_date = False
def draw_bars(self, bars: List[Dict[str, Any]]) -> None:
# sort bars according to bar containers
mpl_traces = []
for container in self.bar_containers:
mpl_traces.append([bar_props for bar_props in self.current_bars if bar_props["mplobj"] in container])
for i, trace in enumerate(mpl_traces):
self.draw_bar(
trace,
self.current_bars_names[i] if i < len(self.current_bars_names) else None,
)
def draw_bar(
self,
coll: List[Dict[str, Union[float, str, int]]],
name: Optional[str] = None,
) -> None:
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
tol = 1e-10
trace = [mpltools.make_bar(**bar_props) for bar_props in coll]
widths = [bar_props["x1"] - bar_props["x0"] for bar_props in trace]
heights = [bar_props["y1"] - bar_props["y0"] for bar_props in trace]
vertical = abs(sum(widths[0] - widths[iii] for iii in range(len(widths)))) < tol
horizontal = abs(sum(heights[0] - heights[iii] for iii in range(len(heights)))) < tol
if (vertical and horizontal) or (not vertical and not horizontal):
# Check for monotonic x. Can't both be true!
x_zeros = [bar_props["x0"] for bar_props in trace]
if all((x_zeros[iii + 1] > x_zeros[iii] for iii in range(len(x_zeros[:-1])))):
orientation = "v"
else:
orientation = "h"
elif vertical:
orientation = "v"
else:
orientation = "h"
if orientation == "v":
self.msg += " Attempting to draw a vertical bar chart\n"
old_heights = [bar_props["y1"] for bar_props in trace]
for bar in trace:
bar["y0"], bar["y1"] = 0, bar["y1"] - bar["y0"]
new_heights = [bar_props["y1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_heights, new_heights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "x"
x = [bar["x0"] + (bar["x1"] - bar["x0"]) / 2 for bar in trace]
y = [bar["y1"] for bar in trace]
bar_gap = mpltools.get_bar_gap([bar["x0"] for bar in trace], [bar["x1"] for bar in trace])
if self.x_is_mpl_date:
x = [bar["x0"] for bar in trace]
formatter = self.current_mpl_ax.get_xaxis().get_major_formatter().__class__.__name__
x = mpltools.mpl_dates_to_datestrings(x, formatter)
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
old_rights = [bar_props["x1"] for bar_props in trace]
for bar in trace:
bar["x0"], bar["x1"] = 0, bar["x1"] - bar["x0"]
new_rights = [bar_props["x1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_rights, new_rights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "y"
x = [bar["x1"] for bar in trace]
y = [bar["y0"] + (bar["y1"] - bar["y0"]) / 2 for bar in trace]
bar_gap = mpltools.get_bar_gap([bar["y0"] for bar in trace], [bar["y1"] for bar in trace])
bar = dict(
type="bar",
orientation=orientation,
x=x,
y=y,
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
opacity=trace[0]["alpha"], # TODO: get all alphas if array?
marker=dict(
color=trace[0]["facecolor"], # TODO: get all
line=dict(width=trace[0]["edgewidth"]),
),
) # TODO ditto
if name:
bar["name"] = name
if len(bar["x"]) >= 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig["data"].append(bar)
if bar_gap is not None:
self.plotly_fig["layout"]["bargap"] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn("found box chart data with length < 1, assuming data redundancy, not plotting.")
def draw_marked_line(self, **props: Any) -> None:
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker = {}, {}
if props["linestyle"] and props["markerstyle"]:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props["linestyle"]:
self.msg += "... with just lines\n"
mode = "lines"
elif props["markerstyle"]:
self.msg += "... with just markers\n"
mode = "markers"
if props["linestyle"]:
color = mpltools.merge_color_and_opacity(props["linestyle"]["color"], props["linestyle"]["alpha"])
# print(mpltools.convert_dash(props['linestyle']['dasharray']))
line = dict(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
if props["markerstyle"]:
marker = dict(
opacity=props["markerstyle"]["alpha"],
color=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
if props["coordinates"] == "data":
marked_line = dict(
type="scatter",
mode=mode,
name=(str(props["label"]) if isinstance(props["label"], six.string_types) else props["label"]),
x=props["data"][0]
if props.get("type") == "collection" and props.get("is_3d")
else [xy_pair[0] for xy_pair in props["data"]],
y=props["data"][1]
if props.get("type") == "collection" and props.get("is_3d")
else [xy_pair[1] for xy_pair in props["data"]],
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
line=line,
marker=marker,
)
if props.get("is_3d"):
marked_line["z"] = (
props["data"][2]
if props.get("type") == "collection"
else [xyz_tuple[2] for xyz_tuple in props["data"]]
)
marked_line["type"] = "scatter3d"
marked_line["zaxis"] = "z{0}".format(self.axis_ct)
if self.x_is_mpl_date:
formatter = self.current_mpl_ax.get_xaxis().get_major_formatter().__class__.__name__
marked_line["x"] = mpltools.mpl_dates_to_datestrings(marked_line["x"], formatter)
self.plotly_fig["data"].append(marked_line)
self.msg += " Heck yeah, I drew that line\n"
else:
self.msg += " Line didn't have 'data' coordinates, not drawing\n"
warnings.warn(
"Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!"
)
def draw_image(self, **props: Any) -> None:
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn(
"Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!"
)
def draw_path_collection(self, ax: Any, **props: Any) -> None:
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props["offset_coordinates"] == "data":
markerstyle = mpltools.get_markerstyle_from_collection(props)
scatter_props = {
"coordinates": "data",
"data": props["offsets"],
"label": None,
"markerstyle": markerstyle,
"linestyle": None,
"type": "collection",
"is_3d": "3d" in str(type(ax)).split(".")[-1].lower(),
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', not drawing\n"
warnings.warn(
"Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates"
)
def draw_path(self, **props: Any) -> None:
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(self.current_mpl_ax.containers, **props)
if is_bar:
self.current_bars += [props]
elif mpltools.is_fancy_bbox(**props):
self.current_bars += [props]
else:
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn("I found a path object that I don't think is part of a bar chart. Ignoring.")
def draw_text(self, **props: Any) -> None:
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
if not mpltools.check_corners(props["mplobj"], self.mpl_fig):
warnings.warn(
"Looks like the annotation(s) you are trying \n"
"to draw lies/lay outside the given figure size.\n\n"
"Therefore, the resulting Plotly figure may not be \n"
"large enough to view the full text. To adjust \n"
"the size of the figure, use the 'width' and \n"
"'height' keys in the Layout object. Alternatively,\n"
"use the Margin object to adjust the figure's margins."
)
align = props["mplobj"]._multialignment
if not align:
align = props["style"]["halign"] # mpl default
if "annotations" not in self.plotly_fig["layout"]:
self.plotly_fig["layout"]["annotations"] = []
if props["text_type"] == "xlabel":
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props["text_type"] == "ylabel":
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props["text_type"] == "title":
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
if props["coordinates"] != "data":
self.msg += " Text object isn't linked to 'data' coordinates\n"
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
else:
self.msg += " Text object is linked to 'data' coordinates\n"
x, y = props["position"]
axis_ct = self.axis_ct
xaxis = self.plotly_fig["layout"]["xaxis{0}".format(axis_ct or "")]
yaxis = self.plotly_fig["layout"]["yaxis{0}".format(axis_ct or "")]
if xaxis["range"][0] < x < xaxis["range"][1] and yaxis["range"][0] < y < yaxis["range"][1]:
xref = "x{0}".format(self.axis_ct)
yref = "y{0}".format(self.axis_ct)
else:
self.msg += " Text object is outside plotting area, making 'paper' reference.\n"
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
annotation = dict(
text=(str(props["text"]) if isinstance(props["text"], six.string_types) else props["text"]),
opacity=props["style"]["alpha"],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=dict(color=props["style"]["color"], size=props["style"]["fontsize"]),
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props: Any) -> None:
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += " More than one subplot, adding title as annotation\n"
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
annotation = dict(
text=props["text"],
font=dict(color=props["style"]["color"], size=props["style"]["fontsize"]),
xref="paper",
yref="paper",
x=x,
y=y,
xanchor="center",
yanchor="bottom",
showarrow=False, # no arrow for a title!
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
else:
self.msg += " Only one subplot found, adding as a plotly title\n"
self.plotly_fig["layout"]["title"] = props["text"]
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"]["titlefont"] = titlefont
def draw_xlabel(self, **props: Any) -> None:
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = "xaxis{0}".format(self.axis_ct or "")
# bugfix: add on last axis, self.axis_ct-1
if axis_key not in self.plotly_fig["layout"]:
axis_key = "xaxis{0}".format(max(0, self.axis_ct - 1) or "")
self.plotly_fig["layout"][axis_key]["title"] = str(props["text"])
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def draw_ylabel(self, **props: Any) -> None:
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = "yaxis{0}".format(self.axis_ct or "")
# bugfix: add on last axis, self.axis_ct-1
if axis_key not in self.plotly_fig["layout"]:
axis_key = "yaxis{0}".format(max(0, self.axis_ct - 1) or "")
self.plotly_fig["layout"][axis_key]["title"] = props["text"]
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def resize(self) -> None:
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ["width", "height", "autosize", "margin"]:
try:
del self.plotly_fig["layout"][key]
except (KeyError, AttributeError):
pass
def strip_style(self) -> None:
self.msg += "Stripping mpl style is no longer supported\n"
|
PlotlyRenderer
|
python
|
PyCQA__pylint
|
tests/functional/n/not_callable.py
|
{
"start": 253,
"end": 295
}
|
class ____:
"""callable object"""
|
Correct
|
python
|
django__django
|
tests/i18n/test_compilation.py
|
{
"start": 13712,
"end": 13995
}
|
class ____(ProjectAndAppTests):
def test_app_locale_compiled(self):
call_command("compilemessages", locale=[self.LOCALE], verbosity=0)
self.assertTrue(os.path.exists(self.PROJECT_MO_FILE))
self.assertTrue(os.path.exists(self.APP_MO_FILE))
|
AppCompilationTest
|
python
|
openai__openai-python
|
src/openai/types/beta/chatkit/chatkit_thread.py
|
{
"start": 1058,
"end": 1683
}
|
class ____(BaseModel):
id: str
"""Identifier of the thread."""
created_at: int
"""Unix timestamp (in seconds) for when the thread was created."""
object: Literal["chatkit.thread"]
"""Type discriminator that is always `chatkit.thread`."""
status: Status
"""Current status for the thread. Defaults to `active` for newly created threads."""
title: Optional[str] = None
"""Optional human-readable title for the thread.
Defaults to null when no title has been generated.
"""
user: str
"""Free-form string that identifies your end user who owns the thread."""
|
ChatKitThread
|
python
|
redis__redis-py
|
tests/test_asyncio/test_connection_pool.py
|
{
"start": 28504,
"end": 36959
}
|
class ____:
interval = 60
@pytest_asyncio.fixture()
async def r(self, create_redis):
redis = await create_redis(health_check_interval=self.interval)
yield redis
await redis.flushall()
def assert_interval_advanced(self, connection):
diff = connection.next_health_check - asyncio.get_running_loop().time()
assert self.interval >= diff > (self.interval - 1)
async def test_health_check_runs(self, r):
if r.connection:
r.connection.next_health_check = asyncio.get_running_loop().time() - 1
await r.connection.check_health()
self.assert_interval_advanced(r.connection)
async def test_arbitrary_command_invokes_health_check(self, r):
# invoke a command to make sure the connection is entirely setup
if r.connection:
await r.get("foo")
r.connection.next_health_check = asyncio.get_running_loop().time()
with mock.patch.object(
r.connection, "send_command", wraps=r.connection.send_command
) as m:
await r.get("foo")
m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(r.connection)
async def test_arbitrary_command_advances_next_health_check(self, r):
if r.connection:
await r.get("foo")
next_health_check = r.connection.next_health_check
# ensure that the event loop's `time()` advances a bit
await asyncio.sleep(0.001)
await r.get("foo")
assert next_health_check < r.connection.next_health_check
async def test_health_check_not_invoked_within_interval(self, r):
if r.connection:
await r.get("foo")
with mock.patch.object(
r.connection, "send_command", wraps=r.connection.send_command
) as m:
await r.get("foo")
ping_call_spec = (("PING",), {"check_health": False})
assert ping_call_spec not in m.call_args_list
async def test_health_check_in_pipeline(self, r):
async with r.pipeline(transaction=False) as pipe:
pipe.connection = await pipe.connection_pool.get_connection()
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
responses = await pipe.set("foo", "bar").get("foo").execute()
m.assert_any_call("PING", check_health=False)
assert responses == [True, b"bar"]
async def test_health_check_in_transaction(self, r):
async with r.pipeline(transaction=True) as pipe:
pipe.connection = await pipe.connection_pool.get_connection()
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
responses = await pipe.set("foo", "bar").get("foo").execute()
m.assert_any_call("PING", check_health=False)
assert responses == [True, b"bar"]
async def test_health_check_in_watched_pipeline(self, r):
await r.set("foo", "bar")
async with r.pipeline(transaction=False) as pipe:
pipe.connection = await pipe.connection_pool.get_connection()
pipe.connection.next_health_check = 0
with mock.patch.object(
pipe.connection, "send_command", wraps=pipe.connection.send_command
) as m:
await pipe.watch("foo")
# the health check should be called when watching
m.assert_called_with("PING", check_health=False)
self.assert_interval_advanced(pipe.connection)
assert await pipe.get("foo") == b"bar"
# reset the mock to clear the call list and schedule another
# health check
m.reset_mock()
pipe.connection.next_health_check = 0
pipe.multi()
responses = await pipe.set("foo", "not-bar").get("foo").execute()
assert responses == [True, b"not-bar"]
m.assert_any_call("PING", check_health=False)
async def test_health_check_in_pubsub_before_subscribe(self, r):
"""A health check happens before the first [p]subscribe"""
p = r.pubsub()
p.connection = await p.connection_pool.get_connection()
p.connection.next_health_check = 0
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
assert not p.subscribed
await p.subscribe("foo")
# the connection is not yet in pubsub mode, so the normal
# ping/pong within connection.send_command should check
# the health of the connection
m.assert_any_call("PING", check_health=False)
self.assert_interval_advanced(p.connection)
subscribe_message = await wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
async def test_health_check_in_pubsub_after_subscribed(self, r):
"""
Pubsub can handle a new subscribe when it's time to check the
connection health
"""
p = r.pubsub()
p.connection = await p.connection_pool.get_connection()
p.connection.next_health_check = 0
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
await p.subscribe("foo")
subscribe_message = await wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# because we weren't subscribed when sending the subscribe
# message to 'foo', the connection's standard check_health ran
# prior to subscribing.
m.assert_any_call("PING", check_health=False)
p.connection.next_health_check = 0
m.reset_mock()
await p.subscribe("bar")
# the second subscribe issues exactly only command (the subscribe)
# and the health check is not invoked
m.assert_called_once_with("SUBSCRIBE", "bar", check_health=False)
# since no message has been read since the health check was
# reset, it should still be 0
assert p.connection.next_health_check == 0
subscribe_message = await wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
assert await wait_for_message(p) is None
# now that the connection is subscribed, the pubsub health
# check should have taken over and include the HEALTH_CHECK_MESSAGE
m.assert_any_call("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
async def test_health_check_in_pubsub_poll(self, r):
"""
Polling a pubsub connection that's subscribed will regularly
check the connection's health.
"""
p = r.pubsub()
p.connection = await p.connection_pool.get_connection()
with mock.patch.object(
p.connection, "send_command", wraps=p.connection.send_command
) as m:
await p.subscribe("foo")
subscribe_message = await wait_for_message(p)
assert subscribe_message["type"] == "subscribe"
self.assert_interval_advanced(p.connection)
# polling the connection before the health check interval
# doesn't result in another health check
m.reset_mock()
next_health_check = p.connection.next_health_check
assert await wait_for_message(p) is None
assert p.connection.next_health_check == next_health_check
m.assert_not_called()
# reset the health check and poll again
# we should not receive a pong message, but the next_health_check
# should be advanced
p.connection.next_health_check = 0
assert await wait_for_message(p) is None
m.assert_called_with("PING", p.HEALTH_CHECK_MESSAGE, check_health=False)
self.assert_interval_advanced(p.connection)
|
TestHealthCheck
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/vision.py
|
{
"start": 56728,
"end": 60451
}
|
class ____(GoogleCloudBaseOperator):
"""
Detect Document Text in the image.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionTextDetectOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param language_hints: List of languages to use for TEXT_DETECTION.
In most cases, an empty value yields the best results since it enables automatic language detection.
For languages based on the Latin alphabet, setting language_hints is not needed.
:param web_detection_params: Parameters for web detection.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_document_detect_text_set_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
) # Iterable[str]
# [END vision_document_detect_text_set_template_fields]
def __init__(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
language_hints: str | list[str] | None = None,
web_detection_params: dict | None = None,
additional_properties: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = prepare_additional_parameters(
additional_properties=additional_properties,
language_hints=language_hints,
web_detection_params=web_detection_params,
)
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.document_text_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
|
CloudVisionTextDetectOperator
|
python
|
django-import-export__django-import-export
|
import_export/mixins.py
|
{
"start": 10241,
"end": 11550
}
|
class ____(ExportViewMixin, FormView):
# Deprecated, and will be removed in a future release (see #1666)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
warn(
"ExportViewFormMixin is deprecated and will be removed "
"in a future release.",
DeprecationWarning,
stacklevel=2,
)
def form_valid(self, form):
formats = self.get_export_formats()
file_format = formats[int(form.cleaned_data["format"])]()
if hasattr(self, "get_filterset"):
queryset = self.get_filterset(self.get_filterset_class()).qs
else:
queryset = self.get_queryset()
export_data = self.get_export_data(file_format, queryset)
content_type = file_format.get_content_type()
# Django 1.7 uses the content_type kwarg instead of mimetype
try:
response = HttpResponse(export_data, content_type=content_type)
except TypeError:
response = HttpResponse(export_data, mimetype=content_type)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(
self.get_export_filename(file_format),
)
post_export.send(sender=None, model=self.model)
return response
|
ExportViewFormMixin
|
python
|
sympy__sympy
|
sympy/codegen/ast.py
|
{
"start": 56396,
"end": 57412
}
|
class ____(FunctionCall):
""" Represents a call to a function with keyword arguments in the code.
Parameters
==========
name : str
function_args : Tuple
keyword_args : dict
Dictionary mapping parameter names to their values
Examples
========
>>> from sympy.codegen.ast import KeywordFunctionCall, String
>>> from sympy.core.containers import Tuple
>>> from sympy import fcode
>>> fcall = KeywordFunctionCall(String('reshape'), Tuple(String('array'), String('shape')), {'order': String('order_array')})
>>> print(fcode(fcall, source_format='free'))
reshape(array, shape, order=order_array)
"""
__slots__ = ('keyword_args',)
_fields = ('name', 'function_args', 'keyword_args') # type: ignore
defaults = {'keyword_args': {}}
@staticmethod
def _construct_keyword_args(kwargs):
from sympy.core.containers import Dict
if kwargs is None:
return Dict({})
return Dict(kwargs)
|
KeywordFunctionCall
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_with.py
|
{
"start": 22856,
"end": 24892
}
|
class ____(__TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as list(targets.values())[0][1]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = list(targets.keys())
keys.sort()
self.assertEqual(keys, [1, 2])
with torch._dynamo.error_on_graph_break(False):
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
with torch._dynamo.error_on_graph_break(False):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (list(targets.values())[0][2], list(targets.values())[0][1], list(targets.values())[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
with torch._dynamo.error_on_graph_break(False):
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
def testWithExtendedTargets(self):
with nullcontext(range(1, 5)) as (a, *b, c):
self.assertEqual(a, 1)
self.assertEqual(b, [2, 3])
self.assertEqual(c, 4)
|
AssignmentTargetTestCase
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_linprog.py
|
{
"start": 70451,
"end": 80060
}
|
class ____(LinprogCommonTests):
def test_callback(self):
# this is the problem from test_callback
def cb(res):
return None
c = np.array([-3, -2])
A_ub = [[2, 1], [1, 1], [1, 0]]
b_ub = [10, 8, 4]
assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub,
callback=cb, method=self.method)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method)
_assert_success(res, desired_fun=-18.0, desired_x=[2, 6])
@pytest.mark.parametrize("options",
[{"maxiter": -1},
{"disp": -1},
{"presolve": -1},
{"time_limit": -1},
{"dual_feasibility_tolerance": -1},
{"primal_feasibility_tolerance": -1},
{"ipm_optimality_tolerance": -1},
{"simplex_dual_edge_weight_strategy": "ekki"},
])
def test_invalid_option_values(self, options):
def f(options):
linprog(1, method=self.method, options=options)
options.update(self.options)
with pytest.warns(OptimizeWarning):
f(options=options)
def test_crossover(self):
A_eq, b_eq, c, _, _ = magic_square(4, rng=np.random.default_rng(2212392))
bounds = (0, 1)
res = linprog(c, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
# there should be nonzero crossover iterations for IPM (only)
assert_equal(res.crossover_nit == 0, self.method != "highs-ipm")
@pytest.mark.fail_slow(10)
def test_marginals(self):
# Ensure lagrange multipliers are correct by comparing the derivative
# w.r.t. b_ub/b_eq/ub/lb to the reported duals.
c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=0)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
lb, ub = bounds.T
# sensitivity w.r.t. b_ub
def f_bub(x):
return linprog(c, A_ub, x, A_eq, b_eq, bounds,
method=self.method).fun
dfdbub = approx_derivative(f_bub, b_ub, method='3-point', f0=res.fun)
assert_allclose(res.ineqlin.marginals, dfdbub)
# sensitivity w.r.t. b_eq
def f_beq(x):
return linprog(c, A_ub, b_ub, A_eq, x, bounds,
method=self.method).fun
dfdbeq = approx_derivative(f_beq, b_eq, method='3-point', f0=res.fun)
assert_allclose(res.eqlin.marginals, dfdbeq)
# sensitivity w.r.t. lb
def f_lb(x):
bounds = np.array([x, ub]).T
return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method).fun
with np.errstate(invalid='ignore'):
# approx_derivative has trouble where lb is infinite
dfdlb = approx_derivative(f_lb, lb, method='3-point', f0=res.fun)
dfdlb[~np.isfinite(lb)] = 0
assert_allclose(res.lower.marginals, dfdlb)
# sensitivity w.r.t. ub
def f_ub(x):
bounds = np.array([lb, x]).T
return linprog(c, A_ub, b_ub, A_eq, b_eq, bounds,
method=self.method).fun
with np.errstate(invalid='ignore'):
dfdub = approx_derivative(f_ub, ub, method='3-point', f0=res.fun)
dfdub[~np.isfinite(ub)] = 0
assert_allclose(res.upper.marginals, dfdub)
def test_dual_feasibility(self):
# Ensure solution is dual feasible using marginals
c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
# KKT dual feasibility equation from Theorem 1 from
# http://www.personal.psu.edu/cxg286/LPKKT.pdf
resid = (-c + A_ub.T @ res.ineqlin.marginals +
A_eq.T @ res.eqlin.marginals +
res.upper.marginals +
res.lower.marginals)
assert_allclose(resid, 0, atol=1e-12)
def test_complementary_slackness(self):
# Ensure that the complementary slackness condition is satisfied.
c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(seed=42)
res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq,
bounds=bounds, method=self.method, options=self.options)
# KKT complementary slackness equation from Theorem 1 from
# http://www.personal.psu.edu/cxg286/LPKKT.pdf modified for
# non-zero RHS
assert np.allclose(res.ineqlin.marginals @ (b_ub - A_ub @ res.x), 0)
@pytest.mark.xfail(reason='Upstream / Wrapper issue, see gh-20589')
def test_bug_20336(self):
"""
Test that `linprog` now solves a poorly-scaled problem
"""
boundaries = [(10000.0, 9010000.0), (0.0, None), (10000.0, None),
(0.0, 84.62623413258109), (10000.0, None), (10000.0, None),
(10000.0, None), (10000.0, None), (10000.0, None),
(10000.0, None), (10000.0, None), (10000.0, None),
(10000.0, None), (None, None), (None, None), (None, None),
(None, None), (None, None), (None, None), (None, None),
(None, None), (None, None), (None, None), (None, None),
(None, None), (None, None), (None, None), (None, None),
(None, None), (None, None), (None, None), (None, None),
(None, None)]
eq_entries = [-1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0,
-1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0, 1.0, -1.0, 0.001,
-0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0,
0.001, -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10,
1.0, -1.0, 0.001, -0.001, 3.7337777768059636e-10,
3.7337777768059636e-10, 1.0, -1.0, 0.001, -0.001,
3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0, 0.001,
-0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0,
0.001, -0.001, 3.7337777768059636e-10, 3.7337777768059636e-10,
1.0, -1.0, 0.001, -0.001, 3.7337777768059636e-10,
3.7337777768059636e-10, 1.0, -1.0, 0.001, -0.001,
3.7337777768059636e-10, 3.7337777768059636e-10, 1.0, -1.0, 0.001,
-0.001, 3.7337777768059636e-10, 3.7337777768059636e-10, 1.0,
-1.0, 0.001, -0.001, 3.7337777768059636e-10,
3.7337777768059636e-10, 1.0, -1.0]
eq_indizes = [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
11, 11, 12, 12, 12, 12, 13, 13, 14, 14, 14, 14, 15, 15, 16, 16,
16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21,
22, 22, 22, 22, 23, 23, 24, 24, 24, 24, 25, 25, 26, 26, 26, 26,
27, 27, 28, 28, 28, 28, 29, 29, 30, 30, 30, 30, 31, 31]
eq_vars = [15, 14, 17, 16, 19, 18, 21, 20, 23, 22, 25, 24, 27, 26, 29, 28, 31,
30, 13, 1, 0, 32, 3, 14, 13, 4, 0, 4, 0, 32, 31, 2, 12, 2, 12, 16,
15, 5, 4, 5, 4, 18, 17, 6, 5, 6, 5, 20, 19, 7, 6, 7, 6, 22, 21, 8,
7, 8, 7, 24, 23, 9, 8, 9, 8, 26, 25, 10, 9, 10, 9, 28, 27, 11, 10,
11, 10, 30, 29, 12, 11, 12, 11]
eq_values = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 9000000.0, 0.0,
0.006587392118285457, -5032.197406716549, 0.0041860502789104696,
-7918.93439542944, 0.0063205763583549035, -5244.625751707402,
0.006053760598424349, -5475.7793929428, 0.005786944838493795,
-5728.248403917573, 0.0055201290785632405, -6005.123623538355,
0.005253313318632687, -6310.123825488683, 0.004986497558702133,
-6647.763714796453, 0.004719681798771578, -7023.578908071522,
0.004452866038841024, -7444.431798646482]
coefficients = [0.0, 0.0, 0.0, -0.011816666666666668, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
np_eq_entries = np.asarray(eq_entries, dtype=np.float64)
np_eq_indizes = np.asarray(eq_indizes, dtype=np.int32)
np_eq_vars = np.asarray(eq_vars, dtype=np.int32)
a_eq= scipy.sparse.csr_array((np_eq_entries,(np_eq_indizes, np_eq_vars)),
shape=(32, 33))
b_eq = np.asarray(eq_values, dtype=np.float64)
c = np.asarray(coefficients, dtype=np.float64)
result = scipy.optimize.linprog(c, A_ub=None, b_ub=None, A_eq=a_eq, b_eq=b_eq,
bounds=boundaries)
assert result.status==0
x = result.x
n_r_x = np.linalg.norm(a_eq @ x - b_eq)
n_r = np.linalg.norm(result.eqlin.residual)
assert_allclose(n_r, n_r_x)
################################
# Simplex Option-Specific Tests#
################################
|
LinprogHiGHSTests
|
python
|
PrefectHQ__prefect
|
tests/cli/test_block.py
|
{
"start": 511,
"end": 638
}
|
class ____(Block):
message: str
"""
TEST_BLOCK_CODE_BAD_SYNTAX = """\
from prefect.blocks.core import Bloc
|
TestForFileRegister
|
python
|
walkccc__LeetCode
|
solutions/871. Minimum Number of Refueling Stops/871.py
|
{
"start": 0,
"end": 498
}
|
class ____:
def minRefuelStops(
self,
target: int,
startFuel: int,
stations: list[list[int]],
) -> int:
# dp[i] := the farthest position we can reach w / i refuels
dp = [startFuel] + [0] * len(stations)
for i, station in enumerate(stations):
for j in range(i + 1, 0, -1):
if dp[j - 1] >= station[0]:
dp[j] = max(dp[j], dp[j - 1] + station[1])
for i, d in enumerate(dp):
if d >= target:
return i
return -1
|
Solution
|
python
|
pytorch__pytorch
|
test/inductor/extension_backends/triton/extension_codegen_backend.py
|
{
"start": 991,
"end": 1392
}
|
class ____(DeviceOpOverrides):
def import_get_raw_stream_as(self, name: str) -> str:
return f"def {name}(name): None\n"
def set_device(self, device_idx: int) -> str: # noqa: ARG002 unused-argument
return ""
def synchronize(self) -> None:
pass
def device_guard(self, device_idx: int) -> str: # noqa: ARG002 unused-argument
return ""
|
CPUDeviceOpOverrides
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 27279,
"end": 27371
}
|
class ____(models.Model):
references = GenericRelation(ReferencedByGenRel)
|
GenRelReference
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airflow/dagster_airflow/resources/airflow_ephemeral_db.py
|
{
"start": 572,
"end": 3878
}
|
class ____(AirflowDatabase):
"""A ephemeral Airflow database Dagster resource."""
def __init__(
self, airflow_home_path: str, dagster_run: DagsterRun, dag_run_config: Optional[dict] = None
):
self.airflow_home_path = airflow_home_path
super().__init__(dagster_run=dagster_run, dag_run_config=dag_run_config)
@staticmethod
def _initialize_database(
airflow_home_path: str = os.path.join(tempfile.gettempdir(), "dagster_airflow"),
connections: list[Connection] = [],
):
os.environ["AIRFLOW_HOME"] = airflow_home_path
os.makedirs(airflow_home_path, exist_ok=True)
with Locker(airflow_home_path):
airflow_initialized = os.path.exists(f"{airflow_home_path}/airflow.db")
# because AIRFLOW_HOME has been overriden airflow needs to be reloaded
if is_airflow_2_loaded_in_environment():
importlib.reload(airflow.configuration)
importlib.reload(airflow.settings)
importlib.reload(airflow)
else:
importlib.reload(airflow)
if not airflow_initialized:
db.initdb()
create_airflow_connections(connections)
@staticmethod
def from_resource_context(context: InitResourceContext) -> "AirflowEphemeralDatabase":
airflow_home_path = os.path.join(tempfile.gettempdir(), f"dagster_airflow_{context.run_id}")
AirflowEphemeralDatabase._initialize_database(
airflow_home_path=airflow_home_path,
connections=[Connection(**c) for c in context.resource_config["connections"]],
)
return AirflowEphemeralDatabase(
airflow_home_path=airflow_home_path,
dagster_run=check.not_none(context.dagster_run, "Context must have run"),
dag_run_config=context.resource_config.get("dag_run_config"),
)
@superseded(
additional_warn_text=(
"`make_ephemeral_airflow_db_resource` has been superseded "
"by the functionality in the `dagster-airlift` library."
)
)
def make_ephemeral_airflow_db_resource(
connections: list[Connection] = [], dag_run_config: Optional[dict] = None
) -> ResourceDefinition:
"""Creates a Dagster resource that provides an ephemeral Airflow database.
Args:
connections (List[Connection]): List of Airflow Connections to be created in the Airflow DB
dag_run_config (Optional[dict]): dag_run configuration to be used when creating a DagRun
Returns:
ResourceDefinition: The ephemeral Airflow DB resource
"""
serialized_connections = serialize_connections(connections)
airflow_db_resource_def = ResourceDefinition(
resource_fn=AirflowEphemeralDatabase.from_resource_context,
config_schema={
"connections": Field(
Array(inner_type=dict),
default_value=serialized_connections,
is_required=False,
),
"dag_run_config": Field(
Noneable(dict),
default_value=dag_run_config,
is_required=False,
),
},
description="Ephemeral Airflow DB to be used by dagster-airflow ",
)
return airflow_db_resource_def
|
AirflowEphemeralDatabase
|
python
|
numba__numba
|
numba/tests/test_repr.py
|
{
"start": 445,
"end": 1656
}
|
class ____(TestCase):
def setUp(self) -> None:
tys_ns = {ty.__name__: ty for ty in NB_TYPES if hasattr(ty, "__name__")}
tys_ns.update({ty.name: ty for ty in NB_TYPES if hasattr(ty, "name")})
self.tys_ns = tys_ns
def check_repr(self, val):
ty = typeof(val)
ty2 = eval(repr(ty), self.tys_ns)
self.assertEqual(ty, ty2)
def test_types(self):
# define some values for the test cases
rec_dtype = [("a", "f8"), ("b", "U8"), ("c", "i8", (2, 3))]
nb_dict = Dict()
nb_dict['a'] = 1
# tests cases: list of different types + list comp of number types
val_types_cases = [
True,
"a",
(1, 2),
(1, "a"),
[1, "a"],
([1, "a"], [2, "b"]),
((1, 2), (3, "b")),
((1, 2), (3, [1, 2])),
np.ones(3),
np.array([(1, "a", np.ones((2, 3)))], dtype=rec_dtype),
nb_dict,
List([1, 2]),
{1, 2},
] + [number(1.1) for number in types.number_domain]
for val in val_types_cases:
self.check_repr(val)
if __name__ == '__main__':
unittest.main()
|
TestRepr
|
python
|
getsentry__sentry
|
src/sentry/models/groupshare.py
|
{
"start": 411,
"end": 1082
}
|
class ____(Model):
"""
A Group that was shared publicly.
"""
__relocation_scope__ = RelocationScope.Excluded
project = FlexibleForeignKey("sentry.Project")
group = FlexibleForeignKey("sentry.Group", unique=True)
uuid = models.CharField(max_length=32, unique=True, default=default_uuid)
# Tracking the user that initiated the share.
user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", null=True)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_groupshare"
__repr__ = sane_repr("project_id", "group_id", "uuid")
|
GroupShare
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/tests/repeat_op_test.py
|
{
"start": 972,
"end": 1760
}
|
class ____(xla_test.XLATestCase):
def test(self):
# Verifies that bounded dynamic result generated from the Where op can be
# Reshaped correctly.
@def_function.function(jit_compile=True)
def repeat(values, repeats, axis):
return array_ops.repeat(values, repeats, axis)
with self.session() as sess:
with self.test_scope():
values = array_ops.constant([[1, 2], [3, 4]], dtype=dtypes.int32)
repeats = array_ops.constant([1, 2], dtype=dtypes.int32)
y1 = repeat(values, repeats, 0)
y2 = repeat(values, repeats, 1)
actual1, actual2 = sess.run([y1, y2])
self.assertAllEqual(actual1, [[1, 2], [3, 4], [3, 4]])
self.assertAllEqual(actual2, [[1, 2, 2], [3, 4, 4]])
if __name__ == "__main__":
test.main()
|
RepeatTest
|
python
|
pytorch__pytorch
|
torch/_inductor/utils.py
|
{
"start": 54573,
"end": 55056
}
|
class ____(DeferredLineBase):
"""At end of codegen call `line.replace(key, value_fn())`"""
def __init__(self, key: str, value_fn: Callable[[], str], line: str):
super().__init__(line)
self.key = key
self.value_fn = value_fn
def __call__(self) -> str:
return self.line.replace(self.key, self.value_fn())
def _new_line(self, line: str) -> DelayReplaceLine:
return DelayReplaceLine(self.key, self.value_fn, line)
|
DelayReplaceLine
|
python
|
django__django
|
django/db/migrations/serializer.py
|
{
"start": 8286,
"end": 8497
}
|
class ____(DeconstructibleSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
|
ModelFieldSerializer
|
python
|
hynek__structlog
|
src/structlog/stdlib.py
|
{
"start": 27463,
"end": 31529
}
|
class ____:
"""
Add extra attributes of `logging.LogRecord` objects to the event
dictionary.
This processor can be used for adding data passed in the ``extra``
parameter of the `logging` module's log methods to the event dictionary.
Args:
allow:
An optional collection of attributes that, if present in
`logging.LogRecord` objects, will be copied to event dictionaries.
If ``allow`` is None all attributes of `logging.LogRecord` objects
that do not exist on a standard `logging.LogRecord` object will be
copied to event dictionaries.
.. versionadded:: 21.5.0
"""
__slots__ = ("_copier",)
def __init__(self, allow: Collection[str] | None = None) -> None:
self._copier: Callable[[EventDict, logging.LogRecord], None]
if allow is not None:
# The contents of allow is copied to a new list so that changes to
# the list passed into the constructor does not change the
# behaviour of this processor.
self._copier = functools.partial(self._copy_allowed, [*allow])
else:
self._copier = self._copy_all
def __call__(
self, logger: logging.Logger, name: str, event_dict: EventDict
) -> EventDict:
record: logging.LogRecord | None = event_dict.get("_record")
if record is not None:
self._copier(event_dict, record)
return event_dict
@classmethod
def _copy_all(
cls, event_dict: EventDict, record: logging.LogRecord
) -> None:
for key, value in record.__dict__.items():
if key not in _LOG_RECORD_KEYS:
event_dict[key] = value
@classmethod
def _copy_allowed(
cls,
allow: Collection[str],
event_dict: EventDict,
record: logging.LogRecord,
) -> None:
for key in allow:
if key in record.__dict__:
event_dict[key] = record.__dict__[key]
LOG_KWARG_NAMES = ("exc_info", "stack_info", "stacklevel")
def render_to_log_args_and_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> tuple[tuple[Any, ...], dict[str, Any]]:
"""
Render ``event_dict`` into positional and keyword arguments for
`logging.Logger` logging methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is passed in the first positional argument, positional
arguments from ``positional_args`` field are passed in subsequent positional
arguments, keyword arguments are extracted from the *event_dict* and the
rest of the *event_dict* is added as ``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 25.1.0
"""
args = (event_dict.pop("event"), *event_dict.pop("positional_args", ()))
kwargs = {
kwarg_name: event_dict.pop(kwarg_name)
for kwarg_name in LOG_KWARG_NAMES
if kwarg_name in event_dict
}
if event_dict:
kwargs["extra"] = event_dict
return args, kwargs
def render_to_log_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> EventDict:
"""
Render ``event_dict`` into keyword arguments for `logging.Logger` logging
methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is translated into ``msg``, keyword arguments are
extracted from the *event_dict* and the rest of the *event_dict* is added as
``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 17.1.0
.. versionchanged:: 22.1.0
``exc_info``, ``stack_info``, and ``stacklevel`` are passed as proper
kwargs and not put into ``extra``.
.. versionchanged:: 24.2.0
``stackLevel`` corrected to ``stacklevel``.
"""
return {
"msg": event_dict.pop("event"),
"extra": event_dict,
**{
kw: event_dict.pop(kw)
for kw in LOG_KWARG_NAMES
if kw in event_dict
},
}
|
ExtraAdder
|
python
|
walkccc__LeetCode
|
solutions/3420. Count Non-Decreasing Subarrays After K Operations/3420.py
|
{
"start": 0,
"end": 876
}
|
class ____:
def countNonDecreasingSubarrays(self, nums: list[int], k: int) -> int:
ans = 0
cost = 0
# Store (number, count) pairs in non-increasing order. The numbers in the
# queue represent what nums[i..j] look like after adjustments.
dq = collections.deque()
j = len(nums) - 1
for i, num in reversed(list(enumerate(nums))):
count = 1
while dq and dq[-1][0] < num:
nextNum, nextCount = dq.pop()
count += nextCount
cost += (num - nextNum) * nextCount # Adjust `nextNum`s to `num`.
dq.append((num, count))
while cost > k: # Remove the rightmost number.
rightmostNum, rightmostCount = dq.popleft()
cost -= (rightmostNum - nums[j])
j -= 1
if rightmostCount > 1:
dq.appendleft((rightmostNum, rightmostCount - 1))
ans += j - i + 1
return ans
|
Solution
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_socketlevel.py
|
{
"start": 84926,
"end": 86047
}
|
class ____(SocketDummyServerTestCase):
def test_pool_size_retry_drain_fail(self) -> None:
def socket_handler(listener: socket.socket) -> None:
for _ in range(2):
sock = listener.accept()[0]
while not sock.recv(65536).endswith(b"\r\n\r\n"):
pass
# send a response with an invalid content length -- this causes
# a ProtocolError to raise when trying to drain the connection
sock.send(
b"HTTP/1.1 404 NOT FOUND\r\n"
b"Content-Length: 1000\r\n"
b"Content-Type: text/plain\r\n"
b"\r\n"
)
sock.close()
self._start_server(socket_handler)
retries = Retry(total=1, raise_on_status=False, status_forcelist=[404])
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/not_found", preload_content=False)
assert pool.num_connections == 1
|
TestRetryPoolSizeDrainFail
|
python
|
apache__airflow
|
providers/exasol/src/airflow/providers/exasol/hooks/exasol.py
|
{
"start": 1385,
"end": 14768
}
|
class ____(DbApiHook):
"""
Interact with Exasol.
You can specify the pyexasol ``compression``, ``encryption``, ``json_lib``
and ``client_name`` parameters in the extra field of your connection
as ``{"compression": True, "json_lib": "rapidjson", etc}``.
See `pyexasol reference
<https://github.com/badoo/pyexasol/blob/master/docs/REFERENCE.md#connect>`_
for more details.
"""
conn_name_attr = "exasol_conn_id"
default_conn_name = "exasol_default"
conn_type = "exasol"
hook_name = "Exasol"
supports_autocommit = True
DEFAULT_SQLALCHEMY_SCHEME = "exa+websocket" # sqlalchemy-exasol dialect
def __init__(self, *args, sqlalchemy_scheme: str | None = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.schema = kwargs.pop("schema", None)
self._sqlalchemy_scheme = sqlalchemy_scheme
def get_conn(self) -> ExaConnection:
conn = self.get_connection(self.get_conn_id())
conn_args = {
"dsn": f"{conn.host}:{conn.port}",
"user": conn.login,
"password": conn.password,
"schema": self.schema or conn.schema,
}
# check for parameters in conn.extra
for arg_name, arg_val in conn.extra_dejson.items():
if arg_name in ["compression", "encryption", "json_lib", "client_name"]:
conn_args[arg_name] = arg_val
conn = pyexasol.connect(**conn_args)
return conn
@property
def sqlalchemy_scheme(self) -> str:
"""Sqlalchemy scheme either from constructor, connection extras or default."""
extra_scheme = self.connection is not None and self.connection_extra_lower.get("sqlalchemy_scheme")
sqlalchemy_scheme = self._sqlalchemy_scheme or extra_scheme or self.DEFAULT_SQLALCHEMY_SCHEME
if sqlalchemy_scheme not in ["exa+websocket", "exa+pyodbc", "exa+turbodbc"]:
raise ValueError(
f"sqlalchemy_scheme in connection extra should be one of 'exa+websocket', 'exa+pyodbc' or 'exa+turbodbc', "
f"but got '{sqlalchemy_scheme}'. See https://github.com/exasol/sqlalchemy-exasol?tab=readme-ov-file#using-sqlalchemy-with-exasol-db for more details."
)
return sqlalchemy_scheme
@property
def sqlalchemy_url(self) -> URL:
"""
Return a Sqlalchemy.engine.URL object from the connection.
:return: the extracted sqlalchemy.engine.URL object.
"""
connection = self.connection
query = connection.extra_dejson
query = {k: v for k, v in query.items() if k.lower() != "sqlalchemy_scheme"}
return URL.create(
drivername=self.sqlalchemy_scheme,
username=connection.login,
password=connection.password,
host=connection.host,
port=connection.port,
database=self.schema or connection.schema,
query=query,
)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
return self.sqlalchemy_url.render_as_string(hide_password=False)
def _get_pandas_df(
self, sql, parameters: Iterable | Mapping[str, Any] | None = None, **kwargs
) -> pd.DataFrame:
"""
Execute the SQL and return a Pandas dataframe.
:param sql: The sql statement to be executed (str) or a list of
sql statements to execute.
:param parameters: The parameters to render the SQL query with.
Other keyword arguments are all forwarded into
``pyexasol.ExaConnection.export_to_pandas``.
"""
with closing(self.get_conn()) as conn:
df = conn.export_to_pandas(sql, query_params=parameters, **kwargs)
return df
@deprecated(
reason="Replaced by function `get_df`.",
category=AirflowProviderDeprecationWarning,
action="ignore",
)
def get_pandas_df(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
**kwargs,
) -> pd.DataFrame:
"""
Execute the sql and returns a pandas dataframe.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
return self._get_pandas_df(sql, parameters, **kwargs)
def _get_polars_df(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
**kwargs,
):
raise NotImplementedError("Polars is not supported for Exasol")
def get_records(
self,
sql: str | list[str],
parameters: Iterable | Mapping[str, Any] | None = None,
) -> list[dict | tuple[Any, ...]]:
"""
Execute the SQL and return a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with closing(self.get_conn()) as conn, closing(conn.execute(sql, parameters)) as cur:
return cur.fetchall()
def get_first(self, sql: str | list[str], parameters: Iterable | Mapping[str, Any] | None = None) -> Any:
"""
Execute the SQL and return the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with closing(self.get_conn()) as conn, closing(conn.execute(sql, parameters)) as cur:
return cur.fetchone()
def export_to_file(
self,
filename: str,
query_or_table: str,
query_params: dict | None = None,
export_params: dict | None = None,
) -> None:
"""
Export data to a file.
:param filename: Path to the file to which the data has to be exported
:param query_or_table: the sql statement to be executed or table name to export
:param query_params: Query parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
:param export_params: Extra parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
"""
self.log.info("Getting data from exasol")
with closing(self.get_conn()) as conn:
conn.export_to_file(
dst=filename,
query_or_table=query_or_table,
query_params=query_params,
export_params=export_params,
)
self.log.info("Data saved to %s", filename)
@staticmethod
def get_description(statement: ExaStatement) -> Sequence[Sequence]:
"""
Get description; copied implementation from DB2-API wrapper.
For more info, see
https://github.com/exasol/pyexasol/blob/master/docs/DBAPI_COMPAT.md#db-api-20-wrapper
:param statement: Exasol statement
:return: description sequence of t
"""
cols = []
for k, v in statement.columns().items():
cols.append(
(
k,
v.get("type", None),
v.get("size", None),
v.get("size", None),
v.get("precision", None),
v.get("scale", None),
True,
)
)
return cols
@overload
def run(
self,
sql: str | Iterable[str],
autocommit: bool = ...,
parameters: Iterable | Mapping[str, Any] | None = ...,
handler: None = ...,
split_statements: bool = ...,
return_last: bool = ...,
) -> None: ...
@overload
def run(
self,
sql: str | Iterable[str],
autocommit: bool = ...,
parameters: Iterable | Mapping[str, Any] | None = ...,
handler: Callable[[Any], T] = ...,
split_statements: bool = ...,
return_last: bool = ...,
) -> tuple | list[tuple] | list[list[tuple] | tuple] | None: ...
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping[str, Any] | None = None,
handler: Callable[[Any], T] | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> tuple | list[tuple] | list[list[tuple] | tuple] | None:
"""
Run a command or a list of commands.
Pass a list of SQL statements to the SQL parameter to get them to
execute sequentially.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of each statement.
:param split_statements: Whether to split a single SQL string into statements and run separately
:param return_last: Whether to return result for only last statement or for all after split
:return: return only result of the LAST SQL expression if handler was provided.
"""
self.descriptions = []
if isinstance(sql, str):
if split_statements:
sql_list: Iterable[str] = self.split_sql_string(sql)
else:
statement = self.strip_sql_string(sql)
sql_list = [statement] if statement.strip() else []
else:
sql_list = sql
if sql_list:
self.log.debug("Executing following statements against Exasol DB: %s", list(sql_list))
else:
raise ValueError("List of SQL statements is empty")
_last_result = None
with closing(self.get_conn()) as conn:
self.set_autocommit(conn, autocommit)
results = []
for sql_statement in sql_list:
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
with closing(conn.execute(sql_statement, parameters)) as exa_statement:
if handler is not None:
result = self._make_common_data_structure(handler(exa_statement))
if return_single_query_results(sql, return_last, split_statements):
_last_result = result
_last_columns = self.get_description(exa_statement)
else:
results.append(result)
self.descriptions.append(self.get_description(exa_statement))
self.log.info("Rows affected: %s", exa_statement.rowcount())
# If autocommit was set to False or db does not support autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if return_single_query_results(sql, return_last, split_statements):
self.descriptions = [_last_columns]
return _last_result
return results
def set_autocommit(self, conn, autocommit: bool) -> None:
"""
Set the autocommit flag on the connection.
:param conn: Connection to set autocommit setting to.
:param autocommit: The autocommit setting to set.
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
self.get_conn_id(),
)
conn.set_autocommit(autocommit)
def get_autocommit(self, conn) -> bool:
"""
Get autocommit setting for the provided connection.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting. True if ``autocommit`` is set
to True on the connection. False if it is either not set, set to
False, or the connection does not support auto-commit.
"""
autocommit = conn.attr.get("autocommit")
if autocommit is None:
autocommit = super().get_autocommit(conn)
return autocommit
@staticmethod
def _serialize_cell(cell, conn=None) -> Any:
"""
Override to disable cell serialization.
Exasol will adapt all arguments to the ``execute()`` method internally,
hence we return cell without any conversion.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The cell
"""
return cell
def exasol_fetch_all_handler(statement: ExaStatement) -> list[tuple] | None:
if statement.result_type == "resultSet":
return statement.fetchall()
return None
def exasol_fetch_one_handler(statement: ExaStatement) -> list[tuple] | None:
if statement.result_type == "resultSet":
return statement.fetchone()
return None
|
ExasolHook
|
python
|
getsentry__sentry
|
tests/sentry/replays/endpoints/test_organization_replay_events_meta.py
|
{
"start": 392,
"end": 5944
}
|
class ____(APITestCase, SnubaTestCase, OccurrenceTestMixin):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1).replace(microsecond=0)
self.login_as(user=self.user)
self.project_1 = self.create_project()
self.project_2 = self.create_project()
self.url = reverse(
"sentry-api-0-organization-replay-events-meta",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
self.features = {"organizations:session-replay": True}
def test_simple(self) -> None:
event_id_a = "a" * 32
event_id_b = "b" * 32
min_ago_ms = self.min_ago + timedelta(milliseconds=123)
event_a = self.store_event(
data={
"event_id": event_id_a,
"timestamp": min_ago_ms.isoformat(),
},
project_id=self.project_1.id,
)
event_b = self.store_event(
data={
"event_id": event_id_b,
"timestamp": min_ago_ms.isoformat(),
},
project_id=self.project_2.id,
)
self.store_event(
data={
"timestamp": min_ago_ms.isoformat(),
},
project_id=self.project_1.id,
)
self.store_event(
data={
"timestamp": min_ago_ms.isoformat(),
},
project_id=self.project_1.id,
)
query = {"query": f"id:[{event_id_a}, {event_id_b}]"}
with self.feature(self.features):
response = self.client.get(self.url, query, format="json")
expected = [
{
"error.type": [],
"id": event_id_a,
"issue.id": event_a.group.id,
"issue": event_a.group.qualified_short_id,
"level": "error",
"project.name": self.project_1.slug,
"timestamp": min_ago_ms.isoformat(),
"title": "<unlabeled event>",
},
{
"error.type": [],
"id": event_id_b,
"issue.id": event_b.group.id,
"issue": event_b.group.qualified_short_id,
"level": "error",
"project.name": self.project_2.slug,
"timestamp": min_ago_ms.isoformat(),
"title": "<unlabeled event>",
},
]
assert response.status_code == 200, response.content
assert sorted(response.data["data"], key=lambda v: v["id"]) == expected
def test_rage_clicks(self) -> None:
event_id_a = "a" * 32
min_ago_ms = self.min_ago + timedelta(milliseconds=123)
_, group_info = self.process_occurrence(
**{
"project_id": self.project.id,
"event_id": event_id_a,
"fingerprint": ["c" * 32],
"issue_title": "Rage Click",
"type": ReplayRageClickType.type_id,
"detection_time": min_ago_ms.timestamp(),
"level": "info",
},
event_data={
"platform": "javascript",
"timestamp": min_ago_ms.isoformat(),
"received": min_ago_ms.isoformat(),
},
)
query = {"query": f"id:[{event_id_a}]", "dataset": "issuePlatform"}
with self.feature(self.features):
response = self.client.get(self.url, query, format="json")
assert group_info is not None
expected = [
{
"error.type": "",
"id": event_id_a,
"issue.id": group_info.group.id,
"issue": group_info.group.qualified_short_id,
"level": "error",
"project.name": self.project.slug,
"timestamp": min_ago_ms.isoformat(),
"title": "Rage Click",
}
]
assert response.status_code == 200, response.content
assert sorted(response.data["data"], key=lambda v: v["id"]) == expected
def test_timestamp_ms_none(self) -> None:
"""
Test handling of null timestamp_ms values in events.
timestamp_ms is a new property added to events, but old events in the database
don't have this field populated (it is null). Over time this will no longer be
an issue as new events always include timestamp_ms, but for now we need to handle
the case where timestamp_ms is null. This test mocks a Snuba response to verify
we handle null timestamp_ms values correctly, simply keeping the timestamp as is.
"""
# Craft the fake Snuba response
fake_snuba_data = {
"data": [
{
"id": "abc123",
"timestamp": self.min_ago.isoformat(),
"timestamp_ms": None,
}
]
}
# Patch the discover.query function used by the endpoint
with patch("sentry.snuba.discover.query", return_value=fake_snuba_data):
query = {"query": "id:[abc123]"}
with self.feature(self.features):
response = self.client.get(self.url, query, format="json")
# Now assert on the response as usual
assert response.status_code == 200
assert "timestamp_ms" not in response.data["data"][0]
assert response.data["data"][0]["timestamp"] == self.min_ago.isoformat()
|
OrganizationEventsMetaTest
|
python
|
spack__spack
|
lib/spack/spack/relocate_text.py
|
{
"start": 11145,
"end": 11357
}
|
class ____(BinaryTextReplaceError):
def __init__(self, old, new):
return super().__init__(
f"Cannot replace {old!r} with {new!r} because the new prefix is longer."
)
|
CannotGrowString
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py
|
{
"start": 269,
"end": 4525
}
|
class ____(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
# Base case: Unbatched data
batch_size = 1
if len(images.shape) == 4:
# This is a batch of images (4D input)
batch_size = self.backend.core.shape(images)[0]
random_values = self.backend.random.uniform(
shape=(batch_size,),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return backend.KerasTensor(
inputs.shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
RandomGrayscale
|
python
|
huggingface__transformers
|
src/transformers/models/rt_detr/image_processing_rt_detr_fast.py
|
{
"start": 3787,
"end": 22244
}
|
class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
format = AnnotationFormat.COCO_DETECTION
do_resize = True
do_rescale = True
do_normalize = False
do_pad = False
size = {"height": 640, "width": 640}
default_to_square = False
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = RTDetrImageProcessorKwargs
do_convert_annotations = True
def __init__(self, **kwargs: Unpack[RTDetrImageProcessorKwargs]) -> None:
# Backwards compatibility
do_convert_annotations = kwargs.get("do_convert_annotations")
do_normalize = kwargs.get("do_normalize")
if do_convert_annotations is None and getattr(self, "do_convert_annotations", None) is None:
self.do_convert_annotations = do_normalize if do_normalize is not None else self.do_normalize
super().__init__(**kwargs)
def prepare_annotation(
self,
image: torch.Tensor,
target: dict,
format: Optional[AnnotationFormat] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> dict:
"""
Prepare an annotation for feeding into RT_DETR model.
"""
format = format if format is not None else self.format
if format == AnnotationFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(
image, target, return_segmentation_masks, input_data_format=input_data_format
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
def resize(
self,
image: torch.Tensor,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
**kwargs,
) -> torch.Tensor:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Resampling filter to use if resizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
# Resize the image so that the shortest edge or the longest edge is of the given size
# while maintaining the aspect ratio of the original image.
new_size = get_size_with_aspect_ratio(
image.size()[-2:],
size["shortest_edge"],
size["longest_edge"],
)
elif size.max_height and size.max_width:
new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"])
elif size.height and size.width:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = F.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
return image
def resize_annotation(
self,
annotation: dict[str, Any],
orig_size: tuple[int, int],
target_size: tuple[int, int],
threshold: float = 0.5,
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Resizes an annotation to a target size.
Args:
annotation (`dict[str, Any]`):
The annotation dictionary.
orig_size (`tuple[int, int]`):
The original size of the input image.
target_size (`tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`InterpolationMode`, defaults to `F.InterpolationMode.NEAREST_EXACT`):
The resampling filter to use when resizing the masks.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST_EXACT
ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)]
new_annotation = {}
new_annotation["size"] = target_size
for key, value in annotation.items():
if key == "boxes":
boxes = value
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device
)
new_annotation["boxes"] = scaled_boxes
elif key == "area":
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation["area"] = scaled_area
elif key == "masks":
masks = value[:, None]
masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks]
masks = torch.stack(masks).to(torch.float32)
masks = masks[:, 0] > threshold
new_annotation["masks"] = masks
elif key == "size":
new_annotation["size"] = target_size
else:
new_annotation[key] = value
return new_annotation
def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict:
image_height, image_width = image_size
norm_annotation = {}
for key, value in annotation.items():
if key == "boxes":
boxes = value
boxes = corners_to_center_format(boxes)
boxes /= torch.as_tensor(
[image_width, image_height, image_width, image_height], dtype=torch.float32, device=boxes.device
)
norm_annotation[key] = boxes
else:
norm_annotation[key] = value
return norm_annotation
def _update_annotation_for_padded_image(
self,
annotation: dict,
input_image_size: tuple[int, int],
output_image_size: tuple[int, int],
padding,
update_bboxes,
) -> dict:
"""
Update the annotation for a padded image.
"""
new_annotation = {}
new_annotation["size"] = output_image_size
ratio_height, ratio_width = (input / output for output, input in zip(output_image_size, input_image_size))
for key, value in annotation.items():
if key == "masks":
masks = value
masks = F.pad(
masks,
padding,
fill=0,
)
masks = safe_squeeze(masks, 1)
new_annotation["masks"] = masks
elif key == "boxes" and update_bboxes:
boxes = value
boxes *= torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], device=boxes.device)
new_annotation["boxes"] = boxes
elif key == "size":
new_annotation["size"] = output_image_size
else:
new_annotation[key] = value
return new_annotation
def pad(
self,
image: torch.Tensor,
padded_size: tuple[int, int],
annotation: Optional[dict[str, Any]] = None,
update_bboxes: bool = True,
fill: int = 0,
):
original_size = image.size()[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
if original_size != padded_size:
padding = [0, 0, padding_right, padding_bottom]
image = F.pad(image, padding, fill=fill)
if annotation is not None:
annotation = self._update_annotation_for_padded_image(
annotation, original_size, padded_size, padding, update_bboxes
)
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = torch.zeros(padded_size, dtype=torch.int64, device=image.device)
pixel_mask[: original_size[0], : original_size[1]] = 1
return image, pixel_mask, annotation
def _preprocess(
self,
images: list["torch.Tensor"],
annotations: Optional[Union[AnnotationType, list[AnnotationType]]],
masks_path: Optional[Union[str, pathlib.Path]],
return_segmentation_masks: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
do_convert_annotations: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: bool,
pad_size: Optional[SizeDict],
format: Optional[Union[str, AnnotationFormat]],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
"""
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
format = AnnotationFormat(format)
if annotations is not None:
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
data = {}
processed_images = []
processed_annotations = []
pixel_masks = [] # Initialize pixel_masks here
for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)):
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
if annotations is not None:
annotation = self.prepare_annotation(
image,
annotation,
format,
return_segmentation_masks=return_segmentation_masks,
masks_path=masks_path,
input_data_format=ChannelDimension.FIRST,
)
if do_resize:
resized_image = self.resize(image, size=size, interpolation=interpolation)
if annotations is not None:
annotation = self.resize_annotation(
annotation,
orig_size=image.size()[-2:],
target_size=resized_image.size()[-2:],
)
image = resized_image
# Fused rescale and normalize
image = self.rescale_and_normalize(image, do_rescale, rescale_factor, do_normalize, image_mean, image_std)
if do_convert_annotations and annotations is not None:
annotation = self.normalize_annotation(annotation, get_image_size(image, ChannelDimension.FIRST))
processed_images.append(image)
processed_annotations.append(annotation)
images = processed_images
annotations = processed_annotations if annotations is not None else None
if do_pad:
# depends on all resized image shapes so we need another loop
if pad_size is not None:
padded_size = (pad_size.height, pad_size.width)
else:
padded_size = get_max_height_width(images)
padded_images = []
padded_annotations = []
for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)):
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
if padded_size == image.size()[-2:]:
padded_images.append(image)
pixel_masks.append(torch.ones(padded_size, dtype=torch.int64, device=image.device))
padded_annotations.append(annotation)
continue
image, pixel_mask, annotation = self.pad(
image, padded_size, annotation=annotation, update_bboxes=do_convert_annotations
)
padded_images.append(image)
padded_annotations.append(annotation)
pixel_masks.append(pixel_mask)
images = padded_images
annotations = padded_annotations if annotations is not None else None
data.update({"pixel_mask": torch.stack(pixel_masks, dim=0)})
data.update({"pixel_values": torch.stack(images, dim=0)})
encoded_inputs = BatchFeature(data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
def post_process_object_detection(
self,
outputs,
threshold: float = 0.5,
target_sizes: Union[TensorType, list[tuple]] = None,
use_focal_loss: bool = True,
):
"""
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
use_focal_loss (`bool` defaults to `True`):
Variable informing if the focal loss was used to predict the outputs. If `True`, a sigmoid is applied
to compute the scores of each detection, otherwise, a softmax function is used.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
requires_backends(self, ["torch"])
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
# convert from relative cxcywh to absolute xyxy
boxes = center_to_corners_format(out_bbox)
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if isinstance(target_sizes, list):
img_h, img_w = torch.as_tensor(target_sizes).unbind(1)
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
num_top_queries = out_logits.shape[1]
num_classes = out_logits.shape[2]
if use_focal_loss:
scores = torch.nn.functional.sigmoid(out_logits)
scores, index = torch.topk(scores.flatten(1), num_top_queries, axis=-1)
labels = index % num_classes
index = index // num_classes
boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1]))
else:
scores = torch.nn.functional.softmax(out_logits)[:, :, :-1]
scores, labels = scores.max(dim=-1)
if scores.shape[1] > num_top_queries:
scores, index = torch.topk(scores, num_top_queries, dim=-1)
labels = torch.gather(labels, dim=1, index=index)
boxes = torch.gather(boxes, dim=1, index=index.unsqueeze(-1).tile(1, 1, boxes.shape[-1]))
results = []
for score, label, box in zip(scores, labels, boxes):
results.append(
{
"scores": score[score > threshold],
"labels": label[score > threshold],
"boxes": box[score > threshold],
}
)
return results
__all__ = ["RTDetrImageProcessorFast"]
|
RTDetrImageProcessorFast
|
python
|
huggingface__transformers
|
src/transformers/models/superpoint/image_processing_superpoint.py
|
{
"start": 1474,
"end": 3760
}
|
class ____(ImagesKwargs, total=False):
r"""
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
do_grayscale: bool
def is_grayscale(
image: np.ndarray,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if input_data_format == ChannelDimension.FIRST:
if image.shape[0] == 1:
return True
return np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])
elif input_data_format == ChannelDimension.LAST:
if image.shape[-1] == 1:
return True
return np.all(image[..., 0] == image[..., 1]) and np.all(image[..., 1] == image[..., 2])
def convert_to_grayscale(
image: ImageInput,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> ImageInput:
"""
Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image.
This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
channel, because of an issue that is discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
image (Image):
The image to convert.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image.
"""
requires_backends(convert_to_grayscale, ["vision"])
if isinstance(image, np.ndarray):
if is_grayscale(image, input_data_format=input_data_format):
return image
if input_data_format == ChannelDimension.FIRST:
gray_image = image[0, ...] * 0.2989 + image[1, ...] * 0.5870 + image[2, ...] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=0)
elif input_data_format == ChannelDimension.LAST:
gray_image = image[..., 0] * 0.2989 + image[..., 1] * 0.5870 + image[..., 2] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=-1)
return gray_image
if not isinstance(image, PIL.Image.Image):
return image
image = image.convert("L")
return image
|
SuperPointImageProcessorKwargs
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/sorting.py
|
{
"start": 3756,
"end": 4443
}
|
class ____(AutoEnum):
"""Defines flow sorting options."""
CREATED_DESC = AutoEnum.auto()
UPDATED_DESC = AutoEnum.auto()
NAME_ASC = AutoEnum.auto()
NAME_DESC = AutoEnum.auto()
@db_injector
def as_sql_sort(self, db: "PrefectDBInterface") -> Iterable[sa.ColumnElement[Any]]:
"""Return an expression used to sort task runs"""
sort_mapping: dict[str, Iterable[sa.ColumnElement[Any]]] = {
"CREATED_DESC": [db.Flow.created.desc()],
"UPDATED_DESC": [db.Flow.updated.desc()],
"NAME_ASC": [db.Flow.name.asc()],
"NAME_DESC": [db.Flow.name.desc()],
}
return sort_mapping[self.value]
|
FlowSort
|
python
|
scipy__scipy
|
scipy/sparse/_csc.py
|
{
"start": 5322,
"end": 8232
}
|
class ____(_csc_base, sparray):
"""
Compressed Sparse Column array.
This can be instantiated in several ways:
csc_array(D)
where D is a 2-D ndarray
csc_array(S)
with another sparse array or matrix S (equivalent to S.tocsc())
csc_array((M, N), [dtype])
to construct an empty array with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_array((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_array((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the array dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the array
shape : 2-tuple
Shape of the array
ndim : int
Number of dimensions (this is always 2)
nnz
size
data
CSC format data array of the array
indices
CSC format index array of the array
indptr
CSC format index pointer array of the array
has_sorted_indices
has_canonical_format
T
Notes
-----
Sparse arrays can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Canonical format
- Within each column, indices are sorted by row.
- There are no duplicate entries.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> csc_array((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_array((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_array((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
|
csc_array
|
python
|
gevent__gevent
|
src/gevent/libuv/watcher.py
|
{
"start": 28968,
"end": 29069
}
|
class ____(_base.PrepareMixin, watcher):
_watcher_callback_name = '_gevent_prepare_callback0'
|
prepare
|
python
|
scikit-learn__scikit-learn
|
sklearn/tests/test_metaestimators.py
|
{
"start": 1113,
"end": 11585
}
|
class ____:
def __init__(
self,
name,
construct,
skip_methods=(),
fit_args=make_classification(random_state=0),
):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
# For the following meta estimators we check for the existence of relevant
# methods only if the sub estimator also contains them. Any methods that
# are implemented in the meta estimator themselves and are not dependent
# on the sub estimator are specified in the `skip_methods` parameter.
DELEGATING_METAESTIMATORS = [
DelegatorData("Pipeline", lambda est: Pipeline([("est", est)])),
DelegatorData(
"GridSearchCV",
lambda est: GridSearchCV(est, param_grid={"param": [5]}, cv=2),
skip_methods=["score"],
),
DelegatorData(
"RandomizedSearchCV",
lambda est: RandomizedSearchCV(
est, param_distributions={"param": [5]}, cv=2, n_iter=1
),
skip_methods=["score"],
),
DelegatorData("RFE", RFE, skip_methods=["transform", "inverse_transform"]),
DelegatorData(
"RFECV", RFECV, skip_methods=["transform", "inverse_transform", "score"]
),
DelegatorData(
"BaggingClassifier",
BaggingClassifier,
skip_methods=[
"transform",
"inverse_transform",
"score",
"predict_proba",
"predict_log_proba",
"predict",
],
),
DelegatorData(
"SelfTrainingClassifier",
lambda est: SelfTrainingClassifier(est),
skip_methods=["transform", "inverse_transform", "predict_proba"],
),
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError("%r is hidden" % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
self.classes_ = []
return True
def _check_fit(self):
check_is_fitted(self)
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, y, *args, **kwargs):
self._check_fit()
return 1.0
methods = [
k
for k in SubEstimator.__dict__.keys()
if not k.startswith("_") and not k.startswith("fit")
]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert hasattr(delegate, method)
assert hasattr(delegator, method), (
"%s does not have method %r when its delegate does"
% (
delegator_data.name,
method,
)
)
# delegation before fit raises a NotFittedError
if method == "score":
with pytest.raises(NotFittedError):
getattr(delegator, method)(
delegator_data.fit_args[0], delegator_data.fit_args[1]
)
else:
with pytest.raises(NotFittedError):
getattr(delegator, method)(delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
if method == "score":
getattr(delegator, method)(
delegator_data.fit_args[0], delegator_data.fit_args[1]
)
else:
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert not hasattr(delegate, method)
assert not hasattr(delegator, method), (
"%s has method %r when its delegate does not"
% (
delegator_data.name,
method,
)
)
def _get_instance_with_pipeline(meta_estimator, init_params):
"""Given a single meta-estimator instance, generate an instance with a pipeline"""
if {"estimator", "base_estimator", "regressor"} & init_params:
if is_regressor(meta_estimator):
estimator = make_pipeline(TfidfVectorizer(), Ridge())
param_grid = {"ridge__alpha": [0.1, 1.0]}
else:
estimator = make_pipeline(TfidfVectorizer(), LogisticRegression())
param_grid = {"logisticregression__C": [0.1, 1.0]}
if init_params.intersection(
{"param_grid", "param_distributions"}
): # SearchCV estimators
extra_params = {"n_iter": 2} if "n_iter" in init_params else {}
return type(meta_estimator)(estimator, param_grid, **extra_params)
else:
return type(meta_estimator)(estimator)
if "transformer_list" in init_params:
# FeatureUnion
transformer_list = [
("trans1", make_pipeline(TfidfVectorizer(), MaxAbsScaler())),
(
"trans2",
make_pipeline(TfidfVectorizer(), StandardScaler(with_mean=False)),
),
]
return type(meta_estimator)(transformer_list)
if "estimators" in init_params:
# stacking, voting
if is_regressor(meta_estimator):
estimator = [
("est1", make_pipeline(TfidfVectorizer(), Ridge(alpha=0.1))),
("est2", make_pipeline(TfidfVectorizer(), Ridge(alpha=1))),
]
else:
estimator = [
(
"est1",
make_pipeline(TfidfVectorizer(), LogisticRegression(C=0.1)),
),
("est2", make_pipeline(TfidfVectorizer(), LogisticRegression(C=1))),
]
return type(meta_estimator)(estimator)
def _generate_meta_estimator_instances_with_pipeline():
"""Generate instances of meta-estimators fed with a pipeline
Are considered meta-estimators all estimators accepting one of "estimator",
"base_estimator" or "estimators".
"""
print("estimators: ", len(all_estimators()))
for _, Estimator in sorted(all_estimators()):
sig = set(signature(Estimator).parameters)
print("\n", Estimator.__name__, sig)
if not sig.intersection(
{
"estimator",
"base_estimator",
"regressor",
"transformer_list",
"estimators",
}
):
continue
with suppress(SkipTest):
for meta_estimator in _construct_instances(Estimator):
print(meta_estimator)
yield _get_instance_with_pipeline(meta_estimator, sig)
# TODO: remove data validation for the following estimators
# They should be able to work on any data and delegate data validation to
# their inner estimator(s).
DATA_VALIDATION_META_ESTIMATORS_TO_IGNORE = [
"AdaBoostClassifier",
"AdaBoostRegressor",
"BaggingClassifier",
"BaggingRegressor",
"ClassifierChain", # data validation is necessary
"FrozenEstimator", # this estimator cannot be tested like others.
"IterativeImputer",
"OneVsOneClassifier", # input validation can't be avoided
"RANSACRegressor",
"RFE",
"RFECV",
"RegressorChain", # data validation is necessary
"SelfTrainingClassifier",
"SequentialFeatureSelector", # not applicable (2D data mandatory)
]
DATA_VALIDATION_META_ESTIMATORS = [
est
for est in _generate_meta_estimator_instances_with_pipeline()
if est.__class__.__name__ not in DATA_VALIDATION_META_ESTIMATORS_TO_IGNORE
]
def _get_meta_estimator_id(estimator):
return estimator.__class__.__name__
@pytest.mark.parametrize(
"estimator", DATA_VALIDATION_META_ESTIMATORS, ids=_get_meta_estimator_id
)
def test_meta_estimators_delegate_data_validation(estimator):
# Check that meta-estimators delegate data validation to the inner
# estimator(s).
# clone to avoid side effects and ensure thread-safe test execution.
estimator = clone(estimator)
rng = np.random.RandomState(0)
set_random_state(estimator)
n_samples = 30
X = rng.choice(np.array(["aa", "bb", "cc"], dtype=object), size=n_samples)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(3, size=n_samples)
# We convert to lists to make sure it works on array-like
X = _enforce_estimator_tags_X(estimator, X).tolist()
y = _enforce_estimator_tags_y(estimator, y).tolist()
# Calling fit should not raise any data validation exception since X is a
# valid input datastructure for the first step of the pipeline passed as
# base estimator to the meta estimator.
estimator.fit(X, y)
# n_features_in_ should not be defined since data is not tabular data.
assert not hasattr(estimator, "n_features_in_")
|
DelegatorData
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/feishu/provider.py
|
{
"start": 219,
"end": 346
}
|
class ____(ProviderAccount):
def get_avatar_url(self):
return self.account.extra_data.get("avatar_big")
|
FeishuAccount
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_text.py
|
{
"start": 30312,
"end": 34601
}
|
class ____(Data2VecTextPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.decoder.weight": "data2vec_text.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `Data2VecTextLMHeadModel` as a standalone, add `is_decoder=True.`")
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
self.lm_head = Data2VecTextLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, Data2VecTextForCausalLM, Data2VecTextConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/data2vec-text-base")
>>> config = Data2VecTextConfig.from_pretrained("facebook/data2vec-text-base")
>>> config.is_decoder = True
>>> model = Data2VecTextForCausalLM.from_pretrained("facebook/data2vec-text-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs: BaseModelOutputWithPoolingAndCrossAttentions = self.data2vec_text(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring
|
Data2VecTextForCausalLM
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/random_crop_test.py
|
{
"start": 150,
"end": 5533
}
|
class ____(testing.TestCase):
def test_random_crop(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_last",
},
input_shape=(1, 3, 4, 3),
supports_masking=False,
run_training_check=False,
expected_output_shape=(1, 2, 2, 3),
)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_last",
},
input_shape=(3, 4, 3),
supports_masking=False,
run_training_check=False,
expected_output_shape=(2, 2, 3),
)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_first",
},
input_shape=(1, 3, 3, 4),
supports_masking=False,
run_training_check=False,
expected_output_shape=(1, 3, 2, 2),
)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 2,
"width": 2,
"data_format": "channels_first",
},
input_shape=(3, 3, 4),
supports_masking=False,
run_training_check=False,
expected_output_shape=(3, 2, 2),
)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
else:
input_shape = (12, 3, 8, 16)
inp = np.random.random(input_shape)
layer = layers.RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_random_crop_partial(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 8, 8, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 8, 8)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 8,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_height(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 10, 8, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 10, 8)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 10,
"width": 8,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_width(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 8, 18, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 8, 18)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 18,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
layer = layers.RandomCrop(8, 9)
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
output = next(iter(ds)).numpy()
self.assertEqual(tuple(output.shape), output_shape)
def test_dict_input(self):
layer = layers.RandomCrop(
3, 3, data_format="channels_last", bounding_box_format="xyxy"
)
data = {
"images": np.random.random((2, 4, 5, 3)),
"labels": np.random.random((2, 7)),
"segmentation_masks": np.random.random((2, 4, 5, 7)),
"bounding_boxes": {
"boxes": np.array([[1, 2, 2, 3]]),
"labels": np.array([0]),
},
}
transformed_data = layer(data)
self.assertEqual(
data["images"].shape[:-1],
transformed_data["segmentation_masks"].shape[:-1],
)
self.assertAllClose(data["labels"], transformed_data["labels"])
self.assertEqual(data["bounding_boxes"]["boxes"].shape, (1, 4))
self.assertAllClose(
data["bounding_boxes"]["labels"],
transformed_data["bounding_boxes"]["labels"],
)
|
RandomCropTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/telemetry/migrations/0001_initial.py
|
{
"start": 184,
"end": 1321
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="BuildData",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("data", models.JSONField()),
],
options={
"verbose_name_plural": "Build data",
},
),
]
|
Migration
|
python
|
rq__rq
|
tests/test_group.py
|
{
"start": 264,
"end": 6660
}
|
class ____(RQTestCase):
job_1_data = Queue.prepare_data(say_hello, job_id='job1')
job_2_data = Queue.prepare_data(say_hello, job_id='job2')
def test_create_group(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [self.job_1_data, self.job_2_data])
assert isinstance(group, Group)
assert len(group.get_jobs()) == 2
q.empty()
def test_group_cleanup_with_no_jobs(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
assert len(group.get_jobs()) == 0
group.cleanup()
assert len(group.get_jobs()) == 0
q.empty()
def test_group_repr(self):
group = Group.create(name='foo', connection=self.connection)
assert group.__repr__() == 'Group(id=foo)'
def test_group_jobs(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
jobs = group.enqueue_many(q, [self.job_1_data, self.job_2_data])
self.assertCountEqual(group.get_jobs(), jobs)
q.empty()
def test_fetch_group(self):
q = Queue(connection=self.connection)
enqueued_group = Group.create(connection=self.connection)
enqueued_group.enqueue_many(q, [self.job_1_data, self.job_2_data])
fetched_group = Group.fetch(enqueued_group.name, self.connection)
self.assertCountEqual(enqueued_group.get_jobs(), fetched_group.get_jobs())
assert len(fetched_group.get_jobs()) == 2
q.empty()
def test_add_jobs(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [self.job_1_data, self.job_2_data])
job2 = group.enqueue_many(q, [self.job_1_data, self.job_2_data])[0]
assert job2 in group.get_jobs()
self.assertEqual(job2.group_id, group.name)
q.empty()
def test_jobs_added_to_group_key(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
jobs = group.enqueue_many(q, [self.job_1_data, self.job_2_data])
job_ids = [job.id for job in group.get_jobs()]
jobs = list({as_text(job) for job in self.connection.smembers(group.key)})
self.assertCountEqual(jobs, job_ids)
q.empty()
def test_group_id_added_to_jobs(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
jobs = group.enqueue_many(q, [self.job_1_data])
assert jobs[0].group_id == group.name
fetched_job = Job.fetch(jobs[0].id, connection=self.connection)
assert fetched_job.group_id == group.name
def test_deleted_jobs_removed_from_group(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [self.job_1_data, self.job_2_data])
job = group.get_jobs()[0]
job.delete()
group.cleanup()
redis_jobs = list({as_text(job) for job in self.connection.smembers(group.key)})
assert job.id not in redis_jobs
assert job not in group.get_jobs()
def test_group_added_to_registry(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [self.job_1_data])
redis_groups = {as_text(group) for group in self.connection.smembers('rq:groups')}
assert group.name in redis_groups
q.empty()
@pytest.mark.slow
def test_expired_jobs_removed_from_group(self):
q = Queue(connection=self.connection)
w = SimpleWorker([q], connection=q.connection)
short_lived_job = Queue.prepare_data(say_hello, result_ttl=1)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [short_lived_job, self.job_1_data])
w.work(burst=True, max_jobs=1)
sleep(2)
w.run_maintenance_tasks()
group.cleanup()
assert len(group.get_jobs()) == 1
assert self.job_1_data.job_id in [job.id for job in group.get_jobs()]
q.empty()
@pytest.mark.slow
def test_empty_group_removed_from_group_list(self):
q = Queue(connection=self.connection)
w = SimpleWorker([q], connection=q.connection)
short_lived_job = Queue.prepare_data(say_hello, result_ttl=1)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [short_lived_job])
w.work(burst=True, max_jobs=1)
sleep(2)
w.run_maintenance_tasks()
redis_groups = {as_text(group) for group in self.connection.smembers('rq:groups')}
assert group.name not in redis_groups
@pytest.mark.slow
def test_fetch_expired_group_raises_error(self):
q = Queue(connection=self.connection)
w = SimpleWorker([q], connection=q.connection)
short_lived_job = Queue.prepare_data(say_hello, result_ttl=1)
group = Group.create(connection=self.connection)
group.enqueue_many(q, [short_lived_job])
w.work(burst=True, max_jobs=1)
sleep(2)
w.run_maintenance_tasks()
self.assertRaises(NoSuchGroupError, Group.fetch, group.name, group.connection)
q.empty()
def test_get_group_key(self):
group = Group(name='foo', connection=self.connection)
self.assertEqual(Group.get_key(group.name), 'rq:group:foo')
def test_all_returns_all_groups(self):
q = Queue(connection=self.connection)
group1 = Group.create(name='group1', connection=self.connection)
Group.create(name='group2', connection=self.connection)
group1.enqueue_many(q, [self.job_1_data, self.job_2_data])
all_groups = Group.all(self.connection)
assert len(all_groups) == 1
assert 'group1' in [group.name for group in all_groups]
assert 'group2' not in [group.name for group in all_groups]
def test_all_deletes_missing_groups(self):
q = Queue(connection=self.connection)
group = Group.create(connection=self.connection)
jobs = group.enqueue_many(q, [self.job_1_data])
jobs[0].delete()
assert not self.connection.exists(Group.get_key(group.name))
assert Group.all(connection=self.connection) == []
|
TestGroup
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_flags.py
|
{
"start": 199,
"end": 548
}
|
class ____:
def test_get_classifiers(self, db_request):
# Clear out any existing flags added from migrations
db_request.db.query(AdminFlag).delete()
flag_a = AdminFlagFactory(id="flag-a")
flag_b = AdminFlagFactory(id="flag-b")
assert views.get_flags(db_request) == {"flags": [flag_a, flag_b]}
|
TestGetFlags
|
python
|
boto__boto3
|
tests/unit/dynamodb/test_transform.py
|
{
"start": 21057,
"end": 21349
}
|
class ____(unittest.TestCase):
def test_register(self):
base_classes = [object]
register_high_level_interface(base_classes)
# Check that the base classes are as expected
assert base_classes == [DynamoDBHighLevelResource, object]
|
TestRegisterHighLevelInterface
|
python
|
RaRe-Technologies__gensim
|
gensim/models/ldamodel.py
|
{
"start": 5017,
"end": 10882
}
|
class ____(utils.SaveLoad):
"""Encapsulate information for distributed computation of :class:`~gensim.models.ldamodel.LdaModel` objects.
Objects of this class are sent over the network, so try to keep them lean to
reduce traffic.
"""
def __init__(self, eta, shape, dtype=np.float32):
"""
Parameters
----------
eta : numpy.ndarray
The prior probabilities assigned to each term.
shape : tuple of (int, int)
Shape of the sufficient statistics: (number of topics to be found, number of terms in the vocabulary).
dtype : type
Overrides the numpy array default types.
"""
self.eta = eta.astype(dtype, copy=False)
self.sstats = np.zeros(shape, dtype=dtype)
self.numdocs = 0
self.dtype = dtype
def reset(self):
"""Prepare the state for a new EM iteration (reset sufficient stats)."""
self.sstats[:] = 0.0
self.numdocs = 0
def merge(self, other):
"""Merge the result of an E step from one node with that of another node (summing up sufficient statistics).
The merging is trivial and after merging all cluster nodes, we have the
exact same result as if the computation was run on a single node (no
approximation).
Parameters
----------
other : :class:`~gensim.models.ldamodel.LdaState`
The state object with which the current one will be merged.
"""
assert other is not None
self.sstats += other.sstats
self.numdocs += other.numdocs
def blend(self, rhot, other, targetsize=None):
"""Merge the current state with another one using a weighted average for the sufficient statistics.
The number of documents is stretched in both state objects, so that they are of comparable magnitude.
This procedure corresponds to the stochastic gradient update from
`'Online Learning for LDA' by Hoffman et al.`_, see equations (5) and (9).
Parameters
----------
rhot : float
Weight of the `other` state in the computed average. A value of 0.0 means that `other`
is completely ignored. A value of 1.0 means `self` is completely ignored.
other : :class:`~gensim.models.ldamodel.LdaState`
The state object with which the current one will be merged.
targetsize : int, optional
The number of documents to stretch both states to.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# stretch the current model's expected n*phi counts to target size
if self.numdocs == 0 or targetsize == self.numdocs:
scale = 1.0
else:
scale = 1.0 * targetsize / self.numdocs
self.sstats *= (1.0 - rhot) * scale
# stretch the incoming n*phi counts to target size
if other.numdocs == 0 or targetsize == other.numdocs:
scale = 1.0
else:
logger.info("merging changes from %i documents into a model of %i documents", other.numdocs, targetsize)
scale = 1.0 * targetsize / other.numdocs
self.sstats += rhot * scale * other.sstats
self.numdocs = targetsize
def blend2(self, rhot, other, targetsize=None):
"""Merge the current state with another one using a weighted sum for the sufficient statistics.
In contrast to :meth:`~gensim.models.ldamodel.LdaState.blend`, the sufficient statistics are not scaled
prior to aggregation.
Parameters
----------
rhot : float
Unused.
other : :class:`~gensim.models.ldamodel.LdaState`
The state object with which the current one will be merged.
targetsize : int, optional
The number of documents to stretch both states to.
"""
assert other is not None
if targetsize is None:
targetsize = self.numdocs
# merge the two matrices by summing
self.sstats += other.sstats
self.numdocs = targetsize
def get_lambda(self):
"""Get the parameters of the posterior over the topics, also referred to as "the topics".
Returns
-------
numpy.ndarray
Parameters of the posterior probability over topics.
"""
return self.eta + self.sstats
def get_Elogbeta(self):
"""Get the log (posterior) probabilities for each topic.
Returns
-------
numpy.ndarray
Posterior probabilities for each topic.
"""
return dirichlet_expectation(self.get_lambda())
@classmethod
def load(cls, fname, *args, **kwargs):
"""Load a previously stored state from disk.
Overrides :class:`~gensim.utils.SaveLoad.load` by enforcing the `dtype` parameter
to ensure backwards compatibility.
Parameters
----------
fname : str
Path to file that contains the needed object.
args : object
Positional parameters to be propagated to class:`~gensim.utils.SaveLoad.load`
kwargs : object
Key-word parameters to be propagated to class:`~gensim.utils.SaveLoad.load`
Returns
-------
:class:`~gensim.models.ldamodel.LdaState`
The state loaded from the given file.
"""
result = super(LdaState, cls).load(fname, *args, **kwargs)
# dtype could be absent in old models
if not hasattr(result, 'dtype'):
result.dtype = np.float64 # float64 was implicitly used before (because it's the default in numpy)
logging.info("dtype was not set in saved %s file %s, assuming np.float64", result.__class__.__name__, fname)
return result
|
LdaState
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-powerbi/dagster_powerbi/components/power_bi_workspace/component.py
|
{
"start": 3022,
"end": 5567
}
|
class ____(AssetSpecUpdateKwargs, Resolvable):
for_dashboard: Optional[ResolvedTargetedPowerBITranslationFn] = None
for_report: Optional[ResolvedTargetedPowerBITranslationFn] = None
for_semantic_model: Optional[ResolvedTargetedPowerBITranslationFn] = None
# data sources are external assets, so only the key can be user-customized
for_data_source: Optional[ResolvedTargetedKeyOnlyPowerBITranslationFn] = None
def resolve_multilayer_translation(context: ResolutionContext, model):
"""The PowerBI translation schema supports defining global transforms
as well as per-content-type transforms. This resolver composes the
per-content-type transforms with the global transforms.
"""
info = TranslatorResolvingInfo(
asset_attributes=model,
resolution_context=context,
model_key="translation",
)
def _translation_fn(base_asset_spec: AssetSpec, data: PowerBITranslatorData):
processed_spec = info.get_asset_spec(
base_asset_spec,
{
"data": data,
"spec": base_asset_spec,
},
)
nested_translation_fns = resolve_fields(
model=model,
resolved_cls=PowerBIAssetArgs,
context=context.with_scope(
**{
"data": data,
"spec": processed_spec,
}
),
)
for_semantic_model = nested_translation_fns.get("for_semantic_model")
for_dashboard = nested_translation_fns.get("for_dashboard")
for_report = nested_translation_fns.get("for_report")
for_data_source = nested_translation_fns.get("for_data_source")
if data.content_type == PowerBIContentType.SEMANTIC_MODEL and for_semantic_model:
return for_semantic_model(processed_spec, data)
if data.content_type == PowerBIContentType.DASHBOARD and for_dashboard:
return for_dashboard(processed_spec, data)
if data.content_type == PowerBIContentType.REPORT and for_report:
return for_report(processed_spec, data)
if data.content_type == PowerBIContentType.DATA_SOURCE and for_data_source:
return for_data_source(processed_spec, data)
return processed_spec
return _translation_fn
ResolvedMultilayerTranslationFn: TypeAlias = Annotated[
TranslationFn,
Resolver(
resolve_multilayer_translation,
model_field_type=Union[str, PowerBIAssetArgs.model()],
),
]
@dataclass
|
PowerBIAssetArgs
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/models.py
|
{
"start": 941,
"end": 1012
}
|
class ____(Model2C):
field4 = models.CharField(max_length=30)
|
Model2D
|
python
|
xlwings__xlwings
|
xlwings/rest/api.py
|
{
"start": 726,
"end": 12499
}
|
class ____(PathConverter):
regex = ".*?"
if sys.platform.startswith("darwin"):
# Hack to allow leading slashes on Mac
api.url_map.converters["path"] = EverythingConverter
def get_book_object(fullname=None, name_or_ix=None, app_ix=None):
assert fullname is None or name_or_ix is None
if fullname:
try:
return xw.Book(fullname)
except Exception as e:
logger.exception(str(e))
abort(500, str(e))
elif name_or_ix:
if name_or_ix.isdigit():
name_or_ix = int(name_or_ix)
app = xw.apps[int(app_ix)] if app_ix else xw.apps.active
try:
return app.books[name_or_ix]
except KeyError as e:
logger.exception(str(e))
abort(500, "Couldn't find Book: " + str(e))
except Exception as e:
logger.exception(str(e))
abort(500, str(e))
def get_sheet_object(book, name_or_id):
if name_or_id.isdigit():
name_or_id = int(name_or_id)
return book.sheets[name_or_id]
@api.route("/apps", methods=["GET"])
def get_apps():
return jsonify(apps=[serialize_app(app) for app in xw.apps])
@api.route("/apps/<pid>", methods=["GET"])
def get_app(pid):
return jsonify(serialize_app(xw.apps[int(pid)]))
@api.route("/apps/<pid>/books", methods=["GET"])
@api.route("/books", methods=["GET"])
def get_books(pid=None):
books = xw.apps[int(pid)].books if pid else xw.books
return jsonify(books=[serialize_book(book) for book in books])
@api.route("/apps/<pid>/books/<book_name_or_ix>", methods=["GET"])
@api.route("/books/<book_name_or_ix>", methods=["GET"])
@api.route("/book/<path:fullname_or_name>", methods=["GET"])
def get_book(book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
return jsonify(serialize_book(book))
@api.route("/apps/<pid>/books/<book_name_or_ix>/sheets", methods=["GET"])
@api.route("/books/<book_name_or_ix>/sheets", methods=["GET"])
@api.route("/book/<path:fullname_or_name>/sheets", methods=["GET"])
def get_sheets(book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
return jsonify(sheets=[serialize_sheet(sheet) for sheet in book.sheets])
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>", methods=["GET"]
)
@api.route("/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>", methods=["GET"])
@api.route("/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>", methods=["GET"])
def get_sheet(sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(serialize_sheet(sheet))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/range",
methods=["GET"],
)
@api.route("/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/range", methods=["GET"])
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/range", methods=["GET"]
)
def get_range(sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(serialize_range(sheet.used_range))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/range/<address>",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/range/<address>",
methods=["GET"],
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/range/<address>",
methods=["GET"],
)
def get_range_address(
address, sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
options = {k: v for k, v in request.args.items()}
return jsonify(serialize_range(sheet.range(address).options(**options)))
@api.route("/apps/<pid>/books/<book_name_or_ix>/names", methods=["GET"])
@api.route("/books/<book_name_or_ix>/names", methods=["GET"])
@api.route("/book/<path:fullname_or_name>/names", methods=["GET"])
def get_book_names(book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
return jsonify(names=[serialize_name(name) for name in book.names])
@api.route("/apps/<pid>/books/<book_name_or_ix>/names/<name>", methods=["GET"])
@api.route("/books/<book_name_or_ix>/names/<name>", methods=["GET"])
@api.route("/book/<path:fullname_or_name>/names/<name>", methods=["GET"])
def get_book_name(name, book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
return jsonify(serialize_name(book.names[name]))
@api.route("/apps/<pid>/books/<book_name_or_ix>/names/<name>/range", methods=["GET"])
@api.route("/books/<book_name_or_ix>/names/<name>/range", methods=["GET"])
@api.route("/book/<path:fullname_or_name>/names/<name>/range", methods=["GET"])
def get_book_name_range(name, book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
return jsonify(serialize_range(book.names[name].refers_to_range))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/names",
methods=["GET"],
)
@api.route("/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/names", methods=["GET"])
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/names", methods=["GET"]
)
def get_sheet_names(
sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(names=[serialize_name(name) for name in sheet.names])
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/names/<sheet_scope_name>",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/names/<sheet_scope_name>",
methods=["GET"],
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/names/<sheet_scope_name>",
methods=["GET"],
)
def get_sheet_name(
sheet_name_or_ix,
sheet_scope_name,
book_name_or_ix=None,
fullname_or_name=None,
pid=None,
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(serialize_name(sheet.names[sheet_scope_name]))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/names/<sheet_scope_name>/range",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/names/<sheet_scope_name>/range",
methods=["GET"],
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/names/<sheet_scope_name>/range",
methods=["GET"],
)
def get_sheet_name_range(
sheet_name_or_ix,
sheet_scope_name,
book_name_or_ix=None,
fullname_or_name=None,
pid=None,
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(serialize_range(sheet.names[sheet_scope_name].refers_to_range))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/charts",
methods=["GET"],
)
@api.route("/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/charts", methods=["GET"])
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/charts", methods=["GET"]
)
def get_charts(sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(charts=[serialize_chart(chart) for chart in sheet.charts])
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/charts/<chart_name_or_ix>",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/charts/<chart_name_or_ix>",
methods=["GET"],
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/charts/<chart_name_or_ix>",
methods=["GET"],
)
def get_chart(
sheet_name_or_ix,
chart_name_or_ix,
book_name_or_ix=None,
fullname_or_name=None,
pid=None,
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
chart = int(chart_name_or_ix) if chart_name_or_ix.isdigit() else chart_name_or_ix
return jsonify(serialize_chart(sheet.charts[chart]))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/shapes",
methods=["GET"],
)
@api.route("/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/shapes", methods=["GET"])
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/shapes", methods=["GET"]
)
def get_shapes(sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(shapes=[serialize_shape(shp) for shp in sheet.shapes])
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/shapes/<shape_name_or_ix>",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/shapes/<shape_name_or_ix>",
methods=["GET"],
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/shapes/<shape_name_or_ix>",
methods=["GET"],
)
def get_shape(
sheet_name_or_ix,
shape_name_or_ix,
book_name_or_ix=None,
fullname_or_name=None,
pid=None,
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
shape = int(shape_name_or_ix) if shape_name_or_ix.isdigit() else shape_name_or_ix
return jsonify(serialize_shape(sheet.shapes[shape]))
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/pictures",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/pictures", methods=["GET"]
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/pictures", methods=["GET"]
)
def get_pictures(
sheet_name_or_ix, book_name_or_ix=None, fullname_or_name=None, pid=None
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
return jsonify(pictures=[serialize_picture(pic) for pic in sheet.pictures])
@api.route(
"/apps/<pid>/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/pictures/<picture_name_or_ix>",
methods=["GET"],
)
@api.route(
"/books/<book_name_or_ix>/sheets/<sheet_name_or_ix>/pictures/<picture_name_or_ix>",
methods=["GET"],
)
@api.route(
"/book/<path:fullname_or_name>/sheets/<sheet_name_or_ix>/pictures/<picture_name_or_ix>",
methods=["GET"],
)
def get_picture(
sheet_name_or_ix,
picture_name_or_ix,
book_name_or_ix=None,
fullname_or_name=None,
pid=None,
):
book = get_book_object(fullname_or_name, book_name_or_ix, pid)
sheet = get_sheet_object(book, sheet_name_or_ix)
pic = (
int(picture_name_or_ix) if picture_name_or_ix.isdigit() else picture_name_or_ix
)
return jsonify(serialize_picture(sheet.pictures[pic]))
def run(host=None, port=None, debug=None, **options):
"""
Run Flask development server
"""
api.run(host=host, port=port, debug=debug, **options)
if __name__ == "__main__":
run(debug=True)
|
EverythingConverter
|
python
|
PyCQA__pylint
|
tests/functional/t/too/too_many_ancestors.py
|
{
"start": 385,
"end": 539
}
|
class ____(Iiii): # [too-many-ancestors]
pass
# https://github.com/pylint-dev/pylint/issues/4166
# https://github.com/pylint-dev/pylint/issues/4415
|
Jjjj
|
python
|
plotly__plotly.py
|
tests/test_optional/test_graph_objs/test_skipped_b64_keys.py
|
{
"start": 158,
"end": 2751
}
|
class ____(NumpyTestUtilsMixin, TestCase):
def test_np_geojson(self):
normal_coordinates = [
[
[-87, 35],
[-87, 30],
[-85, 30],
[-85, 35],
]
]
numpy_coordinates = np.array(normal_coordinates)
data = [
{
"type": "choropleth",
"locations": ["AL"],
"featureidkey": "properties.id",
"z": np.array([10]),
"geojson": {
"type": "Feature",
"properties": {"id": "AL"},
"geometry": {"type": "Polygon", "coordinates": numpy_coordinates},
},
}
]
fig = go.Figure(data=data)
assert (
json.loads(fig.to_json())["data"][0]["geojson"]["geometry"]["coordinates"]
== normal_coordinates
)
def test_np_layers(self):
layout = {
"mapbox": {
"layers": [
{
"sourcetype": "geojson",
"type": "line",
"line": {"dash": np.array([2.5, 1])},
"source": {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": np.array(
[[0.25, 52], [0.75, 50]]
),
},
}
],
},
},
],
"center": {"lon": 0.5, "lat": 51},
},
}
data = [{"type": "scattermap"}]
fig = go.Figure(data=data, layout=layout)
assert (fig.layout["mapbox"]["layers"][0]["line"]["dash"] == (2.5, 1)).all()
assert json.loads(fig.to_json())["layout"]["mapbox"]["layers"][0]["source"][
"features"
][0]["geometry"]["coordinates"] == [[0.25, 52], [0.75, 50]]
def test_np_range(self):
layout = {"xaxis": {"range": np.array([0, 1])}}
fig = go.Figure(data=[{"type": "scatter"}], layout=layout)
assert json.loads(fig.to_json())["layout"]["xaxis"]["range"] == [0, 1]
|
TestShouldNotUseBase64InUnsupportedKeys
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyflakes/future_annotations.py
|
{
"start": 188,
"end": 742
}
|
class ____:
x: int
y: int
@classmethod
def a(cls) -> Foo:
return cls(x=0, y=0)
@classmethod
def b(cls) -> "Foo":
return cls(x=0, y=0)
@classmethod
def c(cls) -> Bar:
return cls(x=0, y=0)
@classmethod
def d(cls) -> Fruit:
return cls(x=0, y=0)
def f(x: int) -> List[int]:
y = List[int]()
y.append(x)
return y
x: Tuple[int, ...] = (1, 2)
def f(param: "Optional[Callable]" = None) -> "None":
pass
def f(param: Optional["Sequence"] = None) -> "None":
pass
|
Foo
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/styles/test_write_cell_style.py
|
{
"start": 295,
"end": 801
}
|
class ____(unittest.TestCase):
"""
Test the Styles _write_cell_style() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_cell_style(self):
"""Test the _write_cell_style() method"""
self.styles._write_cell_style()
exp = """<cellStyle name="Normal" xfId="0" builtinId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteCellStyle
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1535357,
"end": 1536151
}
|
class ____(sgqlc.types.Type, Node):
"""Represents an 'unassigned' event on any assignable object."""
__schema__ = github_schema
__field_names__ = ("actor", "assignable", "assignee", "created_at")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
assignable = sgqlc.types.Field(sgqlc.types.non_null(Assignable), graphql_name="assignable")
"""Identifies the assignable associated with the event."""
assignee = sgqlc.types.Field("Assignee", graphql_name="assignee")
"""Identifies the user or mannequin that was unassigned."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
|
UnassignedEvent
|
python
|
conda__conda
|
conda/core/path_actions.py
|
{
"start": 39619,
"end": 40592
}
|
class ____(RemoveFromPrefixPathAction):
@classmethod
def create_actions(cls, transaction_context, linked_package_data, target_prefix):
return tuple(
cls(transaction_context, linked_package_data, target_prefix, trgt)
for trgt in linked_package_data.files
if bool(_MENU_RE.match(trgt))
)
def __init__(
self, transaction_context, linked_package_data, target_prefix, target_short_path
):
super().__init__(
transaction_context, linked_package_data, target_prefix, target_short_path
)
def execute(self):
log.log(TRACE, "removing menu for %s ", self.target_prefix)
make_menu(self.target_prefix, self.target_short_path, remove=True)
def reverse(self):
log.log(TRACE, "re-creating menu for %s ", self.target_prefix)
make_menu(self.target_prefix, self.target_short_path, remove=False)
def cleanup(self):
pass
|
RemoveMenuAction
|
python
|
numba__numba
|
numba/core/types/abstract.py
|
{
"start": 9863,
"end": 10382
}
|
class ____(IterableType):
"""
Base class for all iterator types.
Derived classes should implement the *yield_type* attribute.
"""
def __init__(self, name, **kwargs):
super(IteratorType, self).__init__(name, **kwargs)
@property
@abstractmethod
def yield_type(self):
"""
The type of values yielded by the iterator.
"""
# This is a property to avoid recursivity (for pickling)
@property
def iterator_type(self):
return self
|
IteratorType
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/stores.py
|
{
"start": 8458,
"end": 9061
}
|
class ____(InMemoryBaseStore[bytes]):
"""In-memory store for bytes.
Attributes:
store: The underlying dictionary that stores the key-value pairs.
Examples:
```python
from langchain.storage import InMemoryByteStore
store = InMemoryByteStore()
store.mset([("key1", b"value1"), ("key2", b"value2")])
store.mget(["key1", "key2"])
# [b'value1', b'value2']
store.mdelete(["key1"])
list(store.yield_keys())
# ['key2']
list(store.yield_keys(prefix="k"))
# ['key2']
```
"""
|
InMemoryByteStore
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/timestamp.py
|
{
"start": 280,
"end": 1815
}
|
class ____(datetime.datetime):
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any]
def __new__(cls, *args, **kw): # datetime is immutable
# type: (Any, Any) -> Any
return datetime.datetime.__new__(cls, *args, **kw)
def __deepcopy__(self, memo):
# type: (Any) -> Any
ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
ts._yaml = copy.deepcopy(self._yaml)
return ts
def replace(
self,
year=None,
month=None,
day=None,
hour=None,
minute=None,
second=None,
microsecond=None,
tzinfo=True,
fold=None,
):
# type: (Any, Any, Any, Any, Any, Any, Any, Any, Any) -> Any
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold)
ts._yaml = copy.deepcopy(self._yaml)
return ts
|
TimeStamp
|
python
|
apache__airflow
|
providers/http/tests/unit/http/hooks/test_http.py
|
{
"start": 27513,
"end": 35681
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="http_default", conn_type="http", host="test:8080/", extra='{"bearer": "test"}'
)
)
create_connection_without_db(Connection(conn_id="http_empty_conn", conn_type="http"))
@pytest.mark.asyncio
async def test_do_api_call_async_non_retryable_error(self, aioresponse):
"""Test api call asynchronously with non retryable error."""
hook = HttpAsyncHook(method="GET")
aioresponse.get("http://httpbin.org/non_existent_endpoint", status=400)
with (
pytest.raises(AirflowException, match="400:Bad Request"),
mock.patch.dict(
"os.environ",
AIRFLOW_CONN_HTTP_DEFAULT="http://httpbin.org/",
),
):
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="non_existent_endpoint")
@pytest.mark.asyncio
async def test_do_api_call_async_retryable_error(self, caplog, aioresponse):
"""Test api call asynchronously with retryable error."""
caplog.set_level(logging.WARNING, logger="airflow.providers.http.hooks.http")
hook = HttpAsyncHook(method="GET")
aioresponse.get("http://httpbin.org/non_existent_endpoint", status=500, repeat=True)
with (
pytest.raises(AirflowException, match="500:Internal Server Error"),
mock.patch.dict(
"os.environ",
AIRFLOW_CONN_HTTP_DEFAULT="http://httpbin.org/",
),
):
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="non_existent_endpoint")
assert "[Try 3 of 3] Request to http://httpbin.org/non_existent_endpoint failed" in caplog.text
@pytest.mark.asyncio
async def test_do_api_call_async_unknown_method(self):
"""Test api call asynchronously for unknown http method."""
hook = HttpAsyncHook(method="NOPE")
json = {"existing_cluster_id": "xxxx-xxxxxx-xxxxxx"}
with pytest.raises(AirflowException, match="Unexpected HTTP Method: NOPE"):
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="non_existent_endpoint", data=json)
@pytest.mark.asyncio
async def test_async_post_request(self):
"""Test api call asynchronously for POST request."""
hook = HttpAsyncHook()
with aioresponses() as m:
m.post(
"http://test:8080/v1/test",
status=200,
payload='{"status":{"status": 200}}',
reason="OK",
)
async with aiohttp.ClientSession() as session:
resp = await hook.run(session=session, endpoint="v1/test")
assert resp.status == 200
@pytest.mark.asyncio
async def test_async_post_request_with_error_code(self):
"""Test api call asynchronously for POST request with error."""
hook = HttpAsyncHook()
with aioresponses() as m:
m.post(
"http://test:8080/v1/test",
status=418,
payload='{"status":{"status": 418}}',
reason="I am teapot",
)
async with aiohttp.ClientSession() as session:
with pytest.raises(AirflowException):
await hook.run(session=session, endpoint="v1/test")
@pytest.mark.asyncio
async def test_async_request_uses_connection_extra(self):
"""Test api call asynchronously with a connection that has extra field."""
connection_extra = {"bearer": "test"}
with aioresponses() as m:
m.post(
"http://test:8080/v1/test",
status=200,
payload='{"status":{"status": 200}}',
reason="OK",
)
hook = HttpAsyncHook()
with mock.patch("aiohttp.ClientSession.post", new_callable=mock.AsyncMock) as mocked_function:
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="v1/test")
headers = mocked_function.call_args.kwargs.get("headers")
assert all(
key in headers and headers[key] == value for key, value in connection_extra.items()
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"setup_connections_with_extras",
[
{
"bearer": "test",
"proxies": {"http": "http://proxy:80", "https": "https://proxy:80"},
"timeout": 60,
"verify": False,
"allow_redirects": False,
"max_redirects": 3,
"trust_env": False,
}
],
indirect=True,
)
async def test_async_request_uses_connection_extra_with_requests_parameters(
self, setup_connections_with_extras
):
"""Test api call asynchronously with a connection that has extra field."""
connection_extra = {"bearer": "test"}
proxy = {"http": "http://proxy:80", "https": "https://proxy:80"}
hook = HttpAsyncHook(http_conn_id="http_conn_with_extras")
with aioresponses() as m:
m.post(
"http://test:8080/v1/test",
status=200,
payload='{"status":{"status": 200}}',
reason="OK",
)
with mock.patch("aiohttp.ClientSession.post", new_callable=mock.AsyncMock) as mocked_function:
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="v1/test")
headers = mocked_function.call_args.kwargs.get("headers")
assert all(
key in headers and headers[key] == value for key, value in connection_extra.items()
)
assert mocked_function.call_args.kwargs.get("proxy") == proxy
assert mocked_function.call_args.kwargs.get("timeout") == 60
assert mocked_function.call_args.kwargs.get("verify_ssl") is False
assert mocked_function.call_args.kwargs.get("allow_redirects") is False
assert mocked_function.call_args.kwargs.get("max_redirects") == 3
assert mocked_function.call_args.kwargs.get("trust_env") is False
@pytest.mark.asyncio
async def test_build_request_url_from_connection(self):
conn = get_airflow_connection()
schema = conn.schema or "http" # default to http
hook = HttpAsyncHook()
with aioresponses() as m:
m.post(
f"{schema}://test:8080/v1/test",
status=200,
payload='{"status":{"status": 200}}',
reason="OK",
)
with mock.patch("aiohttp.ClientSession.post", new_callable=mock.AsyncMock) as mocked_function:
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="v1/test")
assert mocked_function.call_args.args[0] == f"{schema}://{conn.host}v1/test"
@pytest.mark.asyncio
async def test_build_request_url_from_endpoint_param(self):
hook = HttpAsyncHook(http_conn_id="http_empty_conn")
with aioresponses() as m:
m.post(
"http://test.com:8080/v1/test", status=200, payload='{"status":{"status": 200}}', reason="OK"
)
with (
mock.patch("aiohttp.ClientSession.post", new_callable=mock.AsyncMock) as mocked_function,
):
async with aiohttp.ClientSession() as session:
await hook.run(session=session, endpoint="test.com:8080/v1/test")
assert mocked_function.call_args.args[0] == "http://test.com:8080/v1/test"
|
TestHttpAsyncHook
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/metrics_test.py
|
{
"start": 141148,
"end": 143912
}
|
class ____(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.percentage_below(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testOneUpdate(self):
with self.cached_session():
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(values, 100, name='high')
pcnt1, update_op1 = metrics.percentage_below(values, 7, name='medium')
pcnt2, update_op2 = metrics.percentage_below(values, 1, name='low')
self.evaluate(variables.local_variables_initializer())
self.evaluate([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = self.evaluate([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
@test_util.run_deprecated_v1
def testSomePresentOneUpdate(self):
with self.cached_session():
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.percentage_below(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.percentage_below(
values, 1, weights=weights, name='low')
self.evaluate(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
self.evaluate([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = self.evaluate([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
|
PcntBelowThreshTest
|
python
|
encode__starlette
|
starlette/middleware/sessions.py
|
{
"start": 354,
"end": 3572
}
|
class ____:
def __init__(
self,
app: ASGIApp,
secret_key: str | Secret,
session_cookie: str = "session",
max_age: int | None = 14 * 24 * 60 * 60, # 14 days, in seconds
path: str = "/",
same_site: Literal["lax", "strict", "none"] = "lax",
https_only: bool = False,
domain: str | None = None,
) -> None:
self.app = app
self.signer = itsdangerous.TimestampSigner(str(secret_key))
self.session_cookie = session_cookie
self.max_age = max_age
self.path = path
self.security_flags = "httponly; samesite=" + same_site
if https_only: # Secure flag can be used with HTTPS only
self.security_flags += "; secure"
if domain is not None:
self.security_flags += f"; domain={domain}"
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] not in ("http", "websocket"): # pragma: no cover
await self.app(scope, receive, send)
return
connection = HTTPConnection(scope)
initial_session_was_empty = True
if self.session_cookie in connection.cookies:
data = connection.cookies[self.session_cookie].encode("utf-8")
try:
data = self.signer.unsign(data, max_age=self.max_age)
scope["session"] = json.loads(b64decode(data))
initial_session_was_empty = False
except BadSignature:
scope["session"] = {}
else:
scope["session"] = {}
async def send_wrapper(message: Message) -> None:
if message["type"] == "http.response.start":
if scope["session"]:
# We have session data to persist.
data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
data = self.signer.sign(data)
headers = MutableHeaders(scope=message)
header_value = "{session_cookie}={data}; path={path}; {max_age}{security_flags}".format(
session_cookie=self.session_cookie,
data=data.decode("utf-8"),
path=self.path,
max_age=f"Max-Age={self.max_age}; " if self.max_age else "",
security_flags=self.security_flags,
)
headers.append("Set-Cookie", header_value)
elif not initial_session_was_empty:
# The session has been cleared.
headers = MutableHeaders(scope=message)
header_value = "{session_cookie}={data}; path={path}; {expires}{security_flags}".format(
session_cookie=self.session_cookie,
data="null",
path=self.path,
expires="expires=Thu, 01 Jan 1970 00:00:00 GMT; ",
security_flags=self.security_flags,
)
headers.append("Set-Cookie", header_value)
await send(message)
await self.app(scope, receive, send_wrapper)
|
SessionMiddleware
|
python
|
pytorch__pytorch
|
torch/nn/modules/activation.py
|
{
"start": 14153,
"end": 15307
}
|
class ____(Module):
r"""Applies the Hardswish function, element-wise.
Method described in the paper: `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
Hardswish is defined as:
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
x & \text{if~} x \ge +3, \\
x \cdot (x + 3) /6 & \text{otherwise}
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardswish.png
Examples::
>>> m = nn.Hardswish()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["inplace"]
inplace: bool
def __init__(self, inplace: bool = False) -> None:
super().__init__()
self.inplace = inplace
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.hardswish(input, self.inplace)
|
Hardswish
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/clsregistry.py
|
{
"start": 5434,
"end": 7806
}
|
class ____(_ClsRegistryToken):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
__slots__ = "on_remove", "contents", "__weakref__"
contents: Set[weakref.ref[Type[Any]]]
on_remove: CallableReference[Optional[Callable[[], None]]]
def __init__(
self,
classes: Iterable[Type[Any]],
on_remove: Optional[Callable[[], None]] = None,
):
self.on_remove = on_remove
self.contents = {
weakref.ref(item, self._remove_item) for item in classes
}
_registries.add(self)
def remove_item(self, cls: Type[Any]) -> None:
self._remove_item(weakref.ref(cls))
def __iter__(self) -> Generator[Optional[Type[Any]], None, None]:
return (ref() for ref in self.contents)
def attempt_get(self, path: List[str], key: str) -> Type[Any]:
if len(self.contents) > 1:
raise exc.InvalidRequestError(
'Multiple classes found for path "%s" '
"in the registry of this declarative "
"base. Please use a fully module-qualified path."
% (".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref: weakref.ref[Type[Any]]) -> None:
self.contents.discard(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item: Type[Any]) -> None:
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208] and [ticket:10782]
modules = {
cls.__module__
for cls in [ref() for ref in list(self.contents)]
if cls is not None
}
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table."
% (item.__module__, item.__name__)
)
self.contents.add(weakref.ref(item, self._remove_item))
|
_MultipleClassMarker
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/freshness_tests/test_internal_freshness.py
|
{
"start": 2134,
"end": 9872
}
|
class ____:
def test_asset_decorator_with_time_window_freshness_policy(self) -> None:
"""Can we define an asset from decorator with a time window freshness policy?"""
@asset(
freshness_policy=TimeWindowFreshnessPolicy.from_timedeltas(
fail_window=timedelta(minutes=10), warn_window=timedelta(minutes=5)
)
)
def asset_with_internal_freshness_policy():
pass
spec = asset_with_internal_freshness_policy.get_asset_spec()
policy = spec.freshness_policy
assert policy is not None
assert isinstance(policy, TimeWindowFreshnessPolicy)
assert policy.fail_window == SerializableTimeDelta.from_timedelta(timedelta(minutes=10))
assert policy.warn_window == SerializableTimeDelta.from_timedelta(timedelta(minutes=5))
def test_asset_spec_with_time_window_freshness_policy(self) -> None:
"""Can we define an asset spec with a time window freshness policy?"""
def create_spec_and_verify_policy(asset_key: str, fail_window: timedelta, warn_window=None):
asset = dg.AssetSpec(
key=dg.AssetKey(asset_key),
freshness_policy=FreshnessPolicy.time_window(
fail_window=fail_window, warn_window=warn_window
),
)
asset_node_snaps = _get_asset_node_snaps_from_definitions(
dg.Definitions(assets=[asset])
)
snap = asset_node_snaps[0]
policy = snap.freshness_policy
assert isinstance(policy, TimeWindowFreshnessPolicy)
assert policy.fail_window == SerializableTimeDelta.from_timedelta(fail_window)
if warn_window:
assert policy.warn_window == SerializableTimeDelta.from_timedelta(warn_window)
else:
assert policy.warn_window is None
# Test without warn window
create_spec_and_verify_policy("asset1", fail_window=timedelta(minutes=10))
# Test with optional warn window
create_spec_and_verify_policy(
"asset2", fail_window=timedelta(minutes=10), warn_window=timedelta(minutes=5)
)
def test_apply_freshness_policy_to_asset_spec(self) -> None:
"""Can we apply a freshness policy to an asset spec?"""
def assert_freshness_policy(spec, expected_fail_window, expected_warn_window=None):
policy = spec.freshness_policy
assert isinstance(policy, TimeWindowFreshnessPolicy)
assert policy.fail_window == SerializableTimeDelta.from_timedelta(expected_fail_window)
if expected_warn_window:
assert policy.warn_window == SerializableTimeDelta.from_timedelta(
expected_warn_window
)
else:
assert policy.warn_window is None
asset_spec = dg.AssetSpec(key="foo")
asset_spec = apply_freshness_policy(
asset_spec,
FreshnessPolicy.time_window(
fail_window=timedelta(minutes=10), warn_window=timedelta(minutes=5)
),
)
assert_freshness_policy(
asset_spec,
expected_fail_window=timedelta(minutes=10),
expected_warn_window=timedelta(minutes=5),
)
# Overwrite the policy with a new one
asset_spec = apply_freshness_policy(
asset_spec, FreshnessPolicy.time_window(fail_window=timedelta(minutes=60))
)
assert_freshness_policy(asset_spec, expected_fail_window=timedelta(minutes=60))
# Don't overwrite existing metadata
spec_with_metadata = dg.AssetSpec(key="bar", metadata={"existing": "metadata"})
spec_with_metadata = apply_freshness_policy(
spec_with_metadata,
FreshnessPolicy.time_window(fail_window=timedelta(minutes=60)),
)
assert spec_with_metadata.metadata.get("existing") == "metadata"
assert_freshness_policy(
spec_with_metadata,
expected_fail_window=timedelta(minutes=60),
expected_warn_window=None,
)
def test_map_asset_specs_apply_time_window_freshness_policy(self) -> None:
"""Can we map apply_freshness_policy over a selection of assets and asset specs?"""
@asset
def foo_asset():
pass
asset_specs = [foo_asset, dg.AssetSpec(key="bar"), dg.AssetSpec(key="baz")]
defs: dg.Definitions = dg.Definitions(assets=asset_specs)
freshness_policy = TimeWindowFreshnessPolicy.from_timedeltas(
fail_window=timedelta(minutes=10), warn_window=timedelta(minutes=5)
)
mapped_defs = defs.map_resolved_asset_specs(
func=lambda spec: apply_freshness_policy(spec, freshness_policy)
)
assets_and_specs = mapped_defs.assets
assert assets_and_specs is not None
for asset_or_spec in assets_and_specs:
assert isinstance(asset_or_spec, (dg.AssetsDefinition, dg.AssetSpec))
spec = (
asset_or_spec.get_asset_spec()
if isinstance(asset_or_spec, dg.AssetsDefinition)
else asset_or_spec
)
policy = spec.freshness_policy
assert isinstance(policy, TimeWindowFreshnessPolicy)
assert policy.fail_window == SerializableTimeDelta.from_timedelta(timedelta(minutes=10))
assert policy.warn_window == SerializableTimeDelta.from_timedelta(timedelta(minutes=5))
def test_time_window_freshness_policy_fail_window_validation(self) -> None:
with pytest.raises(CheckError):
FreshnessPolicy.time_window(fail_window=timedelta(seconds=59))
with pytest.raises(CheckError):
FreshnessPolicy.time_window(
fail_window=timedelta(seconds=59), warn_window=timedelta(seconds=59)
)
# exactly 1 minute is ok
FreshnessPolicy.time_window(fail_window=timedelta(seconds=60))
FreshnessPolicy.time_window(
fail_window=timedelta(seconds=61), warn_window=timedelta(minutes=1)
)
def test_attach_time_window_freshness_policy_overwrite_existing(self) -> None:
"""Does overwrite_existing respect existing freshness policy on an asset?"""
@asset
def asset_no_policy():
pass
@asset(freshness_policy=FreshnessPolicy.time_window(fail_window=timedelta(hours=24)))
def asset_with_policy():
pass
defs = dg.Definitions(assets=[asset_no_policy, asset_with_policy])
# If no policy is attached, overwrite with new policy containing fail window of 10 minutes
mapped_defs = defs.map_asset_specs(
func=lambda spec: apply_freshness_policy(
spec,
FreshnessPolicy.time_window(fail_window=timedelta(minutes=10)),
overwrite_existing=False,
)
)
specs = mapped_defs.get_all_asset_specs()
# Should see new policy applied to asset without existing policy
spec_no_policy = next(spec for spec in specs if spec.key == dg.AssetKey("asset_no_policy"))
assert spec_no_policy.freshness_policy is not None
assert spec_no_policy.freshness_policy == FreshnessPolicy.time_window(
fail_window=timedelta(minutes=10)
)
spec_with_policy = next(
spec for spec in specs if spec.key == dg.AssetKey("asset_with_policy")
)
assert spec_with_policy.freshness_policy is not None
assert spec_with_policy.freshness_policy == FreshnessPolicy.time_window(
fail_window=timedelta(hours=24)
)
|
TestTimeWindowFreshnessPolicy
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_check_commands.py
|
{
"start": 8598,
"end": 14582
}
|
class ____:
"""Test suite for --ignore-exclude-lists flag functionality."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
def test_ignore_exclude_lists_flag_with_symbol_excluded_symbol(self):
"""Test --ignore-exclude-lists with --symbol on a known excluded symbol."""
# Test with a symbol we know is in EXCLUDE_MISSING_DOCSTRINGS
result_normal = self.runner.invoke(check, ["docstrings", "--symbol", "dagster.BoolSource"])
result_ignore = self.runner.invoke(
check, ["docstrings", "--symbol", "dagster.BoolSource", "--ignore-exclude-lists"]
)
# Normal mode should skip the excluded symbol
assert result_normal.exit_code == 0
assert (
"Symbol 'dagster.BoolSource' is in the exclude list - skipping validation"
in result_normal.output
)
assert "✓ Symbol excluded from validation" in result_normal.output
# Ignore mode should actually validate the symbol
assert result_ignore.exit_code in [0, 1] # May pass or fail validation
assert "Validating docstring for: dagster.BoolSource" in result_ignore.output
# Should not show exclusion message
assert "Symbol 'dagster.BoolSource' is in the exclude list" not in result_ignore.output
def test_ignore_exclude_lists_flag_with_symbol_non_excluded_symbol(self):
"""Test --ignore-exclude-lists with --symbol on a non-excluded symbol."""
# Test with a symbol that should have good docstrings
result_normal = self.runner.invoke(check, ["docstrings", "--symbol", "dagster.asset"])
result_ignore = self.runner.invoke(
check, ["docstrings", "--symbol", "dagster.asset", "--ignore-exclude-lists"]
)
# Both should behave the same for non-excluded symbols
assert result_normal.exit_code == 0
assert result_ignore.exit_code == 0
assert "Validating docstring for: dagster.asset" in result_normal.output
assert "Validating docstring for: dagster.asset" in result_ignore.output
def test_ignore_exclude_lists_flag_with_package_shows_more_issues(self):
"""Test --ignore-exclude-lists with --package shows more validation issues."""
# Use a small package that likely has excluded symbols
result_normal = self.runner.invoke(
check, ["docstrings", "--package", "dagster._core.errors"]
)
result_ignore = self.runner.invoke(
check, ["docstrings", "--package", "dagster._core.errors", "--ignore-exclude-lists"]
)
# Both should complete
assert result_normal.exit_code in [0, 1]
assert result_ignore.exit_code in [0, 1]
# Normal mode may show exclusion counts
if "symbols excluded from validation" in result_normal.output:
# Ignore mode should not show exclusion counts
assert "symbols excluded from validation" not in result_ignore.output
def test_ignore_exclude_lists_flag_with_all_shows_more_issues(self):
"""Test --ignore-exclude-lists with --all shows more validation issues."""
result_normal = self.runner.invoke(check, ["docstrings", "--all"])
result_ignore = self.runner.invoke(check, ["docstrings", "--all", "--ignore-exclude-lists"])
# Both should complete
assert result_normal.exit_code in [0, 1]
assert result_ignore.exit_code in [0, 1]
# Both should show overall summary
assert "Overall Summary:" in result_normal.output
assert "Overall Summary:" in result_ignore.output
# Normal mode should show exclusion information if excludes exist
normal_has_exclusions = "symbols excluded from validation" in result_normal.output
# Ignore mode should not show exclusion information
assert "symbols excluded from validation" not in result_ignore.output
# If there were exclusions in normal mode, ignore mode should process more symbols
if normal_has_exclusions:
# Extract symbol counts (this is a bit fragile but useful for validation)
import re
normal_match = re.search(r"(\d+) symbols processed", result_normal.output)
ignore_match = re.search(r"(\d+) symbols processed", result_ignore.output)
if normal_match and ignore_match:
normal_count = int(normal_match.group(1))
ignore_count = int(ignore_match.group(1))
# Ignore mode should process at least as many symbols as normal mode
assert ignore_count >= normal_count
def test_ignore_exclude_lists_flag_consistency_across_modes(self):
"""Test that --ignore-exclude-lists behaves consistently across different modes."""
# Test a known excluded symbol in both single-symbol and package mode
symbol_result = self.runner.invoke(
check, ["docstrings", "--symbol", "dagster.BoolSource", "--ignore-exclude-lists"]
)
# Find which package contains dagster.BoolSource and test that package
package_result = self.runner.invoke(
check, ["docstrings", "--package", "dagster", "--ignore-exclude-lists"]
)
# Both should complete
assert symbol_result.exit_code in [0, 1]
assert package_result.exit_code in [0, 1]
# Neither should show exclusion messages
assert "is in the exclude list" not in symbol_result.output
assert "excluded from validation" not in package_result.output
def test_ignore_exclude_lists_flag_help_text(self):
"""Test that --ignore-exclude-lists flag appears in help text."""
result = self.runner.invoke(check, ["docstrings", "--help"])
assert result.exit_code == 0
assert "--ignore-exclude-lists" in result.output
assert "Ignore exclude lists and show all docstring issues" in result.output
|
TestIgnoreExcludeListsFlag
|
python
|
davidhalter__parso
|
parso/normalizer.py
|
{
"start": 4207,
"end": 5153
}
|
class ____:
code: int
message: str
def __init__(self, normalizer):
self._normalizer = normalizer
def is_issue(self, node):
raise NotImplementedError()
def get_node(self, node):
return node
def _get_message(self, message, node):
if message is None:
message = self.message
if message is None:
raise ValueError("The message on the class is not set.")
return message
def add_issue(self, node, code=None, message=None):
if code is None:
code = self.code
if code is None:
raise ValueError("The error code on the class is not set.")
message = self._get_message(message, node)
self._normalizer.add_issue(node, code, message)
def feed_node(self, node):
if self.is_issue(node):
issue_node = self.get_node(node)
self.add_issue(issue_node)
|
Rule
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/control_flow_ops_benchmark.py
|
{
"start": 1239,
"end": 3893
}
|
class ____(test.Benchmark):
"""Checks the runtime performance of outputting all intermediates."""
NUM_INTERMEDIATES = 1000
NUM_ITERS = 500
NUM_WARM_UP_ITERS = 50
def _create_cond(self, x):
def branch_fn():
# Use a random value so the adds can't be constant folded.
return x + sum(random_ops.random_normal([])
for _ in range(self.NUM_INTERMEDIATES))
# Use a dynamic predicate to make sure the cond isn't constant folded.
return cond.cond(math_ops.not_equal(x, -1),
branch_fn, lambda: 0.0)
def _benchmark_defun(self):
"""Benchmarks cond in a defun."""
@def_function.function
def cond_fn(x):
return self._create_cond(x)
# Warm up
for _ in range(self.NUM_WARM_UP_ITERS):
cond_fn(0.0)
start_time = time.time()
for _ in range(self.NUM_ITERS):
cond_fn(0.0)
self.report_benchmark(
wall_time=time.time() - start_time,
iters=self.NUM_ITERS)
def _benchmark_graph(self):
"""Benchmarks cond in legacy graph mode."""
with context.graph_mode():
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
cond_val = self._create_cond(x)
with session.Session() as sess:
cond_fn = sess.make_callable(cond_val, [x])
# Warm up
for _ in range(self.NUM_WARM_UP_ITERS):
cond_fn(0.0)
start_time = time.time()
for _ in range(self.NUM_ITERS):
cond_fn(0.0)
self.report_benchmark(
wall_time=time.time() - start_time,
iters=self.NUM_ITERS)
def benchmark_cond_v1_defun(self):
old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = False
self._benchmark_defun()
control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val
def benchmark_cond_v2_defun(self):
old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
self._benchmark_defun()
control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val
def benchmark_cond_v1_graph(self):
old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = False
self._benchmark_graph()
control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val
def benchmark_cond_v2_graph(self):
old_val = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
self._benchmark_graph()
control_flow_util.ENABLE_CONTROL_FLOW_V2 = old_val
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
CondWithManyIntermediatesBenchmark
|
python
|
joke2k__faker
|
faker/providers/phone_number/cs_CZ/__init__.py
|
{
"start": 49,
"end": 1417
}
|
class ____(PhoneNumberProvider):
# Phone numbers
# https://cs.wikipedia.org/wiki/Telefonn%C3%AD_%C4%8D%C3%ADslo
# https://www.srovnejto.cz/blog/jake-jsou-telefonni-predvolby-do-zahranici/
formats = (
# prefix 00420
# 601-608
"00420 601 ### ###",
"00420 602 ### ###",
"00420 603 ### ###",
"00420 604 ### ###",
"00420 605 ### ###",
"00420 606 ### ###",
"00420 607 ### ###",
"00420 608 ### ###",
# 702-705
"00420 702 ### ###",
"00420 703 ### ###",
"00420 704 ### ###",
"00420 705 ### ###",
# 720-739
"00420 72# ### ###",
"00420 73# ### ###",
# 770-779
"00420 77# ### ###",
# 790-799
"00420 79# ### ###",
# prefix +420
# 601-608
"+420 601 ### ###",
"+420 602 ### ###",
"+420 603 ### ###",
"+420 604 ### ###",
"+420 605 ### ###",
"+420 606 ### ###",
"+420 607 ### ###",
"+420 608 ### ###",
# 702-705
"+420 702 ### ###",
"+420 703 ### ###",
"+420 704 ### ###",
"+420 705 ### ###",
# 720-739
"+420 72# ### ###",
"+420 73# ### ###",
# 770-779
"+420 77# ### ###",
# 790-799
"+420 79# ### ###",
)
|
Provider
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_lazy_relations.py
|
{
"start": 50854,
"end": 53316
}
|
class ____(fixtures.MappedTest):
"""ORM-level test for [ticket:3788]"""
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column("id1", Integer, primary_key=True),
Column("id2", Integer, primary_key=True),
)
Table(
"b_sameorder",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id1", Integer),
Column("a_id2", Integer),
ForeignKeyConstraint(["a_id1", "a_id2"], ["a.id1", "a.id2"]),
)
Table(
"b_differentorder",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id1", Integer),
Column("a_id2", Integer),
ForeignKeyConstraint(["a_id1", "a_id2"], ["a.id1", "a.id2"]),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
def test_use_get_sameorder(self):
self.mapper_registry.map_imperatively(self.classes.A, self.tables.a)
m_b = self.mapper_registry.map_imperatively(
self.classes.B,
self.tables.b_sameorder,
properties={"a": relationship(self.classes.A)},
)
configure_mappers()
is_true(m_b.relationships.a.strategy.use_get)
def test_use_get_reverseorder(self):
self.mapper_registry.map_imperatively(self.classes.A, self.tables.a)
m_b = self.mapper_registry.map_imperatively(
self.classes.B,
self.tables.b_differentorder,
properties={"a": relationship(self.classes.A)},
)
configure_mappers()
is_true(m_b.relationships.a.strategy.use_get)
def test_dont_use_get_pj_is_different(self):
self.mapper_registry.map_imperatively(self.classes.A, self.tables.a)
m_b = self.mapper_registry.map_imperatively(
self.classes.B,
self.tables.b_sameorder,
properties={
"a": relationship(
self.classes.A,
primaryjoin=and_(
self.tables.a.c.id1 == self.tables.b_sameorder.c.a_id1,
self.tables.a.c.id2 == 12,
),
)
},
)
configure_mappers()
is_false(m_b.relationships.a.strategy.use_get)
|
CompositeSimpleM2OTest
|
python
|
mlflow__mlflow
|
mlflow/gateway/providers/ai21labs.py
|
{
"start": 316,
"end": 3204
}
|
class ____(BaseProvider):
NAME = "AI21Labs"
CONFIG_TYPE = AI21LabsConfig
def __init__(self, config: EndpointConfig) -> None:
super().__init__(config)
if config.model.config is None or not isinstance(config.model.config, AI21LabsConfig):
raise TypeError(f"Unexpected config type {config.model.config}")
self.ai21labs_config: AI21LabsConfig = config.model.config
self.headers = {"Authorization": f"Bearer {self.ai21labs_config.ai21labs_api_key}"}
self.base_url = f"https://api.ai21.com/studio/v1/{self.config.model.name}/"
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
key_mapping = {
"stop": "stopSequences",
"n": "numResults",
"max_tokens": "maxTokens",
}
for k1, k2 in key_mapping.items():
if k2 in payload:
raise AIGatewayException(
status_code=422, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
if payload.get("stream", False):
raise AIGatewayException(
status_code=422,
detail="Setting the 'stream' parameter to 'true' is not supported with the MLflow "
"Gateway.",
)
payload = rename_payload_keys(payload, key_mapping)
resp = await send_request(
headers=self.headers,
base_url=self.base_url,
path="complete",
payload=payload,
)
# Response example (https://docs.ai21.com/reference/j2-complete-ref)
# ```
# {
# "id": "7921a78e-d905-c9df-27e3-88e4831e3c3b",
# "prompt": {
# "text": "I will"
# },
# "completions": [
# {
# "data": {
# "text": " complete this"
# },
# "finishReason": {
# "reason": "length",
# "length": 2
# }
# }
# ]
# }
# ```
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=self.config.model.name,
choices=[
completions.Choice(
index=idx,
text=c["data"]["text"],
finish_reason=c["finishReason"]["reason"],
)
for idx, c in enumerate(resp["completions"])
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
|
AI21LabsProvider
|
python
|
gevent__gevent
|
src/gevent/tests/test__monkey.py
|
{
"start": 133,
"end": 6581
}
|
class ____(SubscriberCleanupMixin, unittest.TestCase):
maxDiff = None
def setUp(self):
super(TestMonkey, self).setUp()
self.all_events = []
self.addSubscriber(self.all_events.append)
self.orig_saved = orig_saved = {}
for k, v in monkey.saved.items():
orig_saved[k] = v.copy()
def tearDown(self):
monkey.saved = self.orig_saved
del self.orig_saved
del self.all_events
super(TestMonkey, self).tearDown()
def test_time(self):
import time
from gevent import time as gtime
self.assertIs(time.sleep, gtime.sleep)
def test_thread(self):
import _thread as thread
import threading
from gevent import thread as gthread
self.assertIs(thread.start_new_thread, gthread.start_new_thread)
if sys.version_info[:2] < (3, 13):
self.assertIs(threading._start_new_thread, gthread.start_new_thread)
else:
self.assertIs(threading._start_joinable_thread, gthread.start_joinable_thread)
# Event patched by default
self.assertTrue(monkey.is_object_patched('threading', 'Event'))
if sys.version_info[0] == 2:
from gevent import threading as gthreading
from gevent.event import Event as GEvent
self.assertIs(threading._sleep, gthreading._sleep)
self.assertTrue(monkey.is_object_patched('threading', '_Event'))
self.assertIs(threading._Event, GEvent)
def test_socket(self):
import socket
from gevent import socket as gevent_socket
self.assertIs(socket.create_connection, gevent_socket.create_connection)
def test_os(self):
import os
import types
from gevent import os as gos
for name in ('fork', 'forkpty'):
if hasattr(os, name):
attr = getattr(os, name)
self.assertNotIn('built-in', repr(attr))
self.assertNotIsInstance(attr, types.BuiltinFunctionType)
self.assertIsInstance(attr, types.FunctionType)
self.assertIs(attr, getattr(gos, name))
def test_saved(self):
self.assertTrue(monkey.saved)
for modname, objects in monkey.saved.items():
self.assertTrue(monkey.is_module_patched(modname))
for objname in objects:
self.assertTrue(monkey.is_object_patched(modname, objname))
def test_patch_subprocess_twice(self):
Popen = monkey.get_original('subprocess', 'Popen')
self.assertNotIn('gevent', repr(Popen))
self.assertIs(Popen, monkey.get_original('subprocess', 'Popen'))
monkey.patch_subprocess()
self.assertIs(Popen, monkey.get_original('subprocess', 'Popen'))
def test_patch_twice_warnings_events(self):
import warnings
all_events = self.all_events
with warnings.catch_warnings(record=True) as issued_warnings:
# Patch again, triggering just one warning, for
# a different set of arguments. Because we're going to False instead of
# turning something on, nothing is actually done, no events are issued.
monkey.patch_all(os=False, extra_kwarg=42)
self.assertEqual(len(issued_warnings), 1)
self.assertIn('more than once', str(issued_warnings[0].message))
self.assertEqual(all_events, [])
# Same warning again, but still nothing is done.
del issued_warnings[:]
monkey.patch_all(os=False)
self.assertEqual(len(issued_warnings), 1)
self.assertIn('more than once', str(issued_warnings[0].message))
self.assertEqual(all_events, [])
self.orig_saved['_gevent_saved_patch_all_module_settings'] = monkey.saved[
'_gevent_saved_patch_all_module_settings']
# Make sure that re-patching did not change the monkey.saved
# attribute, overwriting the original functions.
if 'logging' in monkey.saved and 'logging' not in self.orig_saved:
# some part of the warning or unittest machinery imports logging
self.orig_saved['logging'] = monkey.saved['logging']
self.assertEqual(self.orig_saved, monkey.saved)
# Make sure some problematic attributes stayed correct.
# NOTE: This was only a problem if threading was not previously imported.
for k, v in monkey.saved['threading'].items():
self.assertNotIn('gevent', str(v), (k, v))
def test_patch_events(self):
from gevent import events
from gevent.testing import verify
all_events = self.all_events
def veto(event):
if isinstance(event, events.GeventWillPatchModuleEvent) and event.module_name == 'ssl':
raise events.DoNotPatch
self.addSubscriber(veto)
monkey.saved = {} # Reset
monkey.patch_all(thread=False, select=False, extra_kwarg=42) # Go again
self.assertIsInstance(all_events[0], events.GeventWillPatchAllEvent)
self.assertEqual({'extra_kwarg': 42}, all_events[0].patch_all_kwargs)
verify.verifyObject(events.IGeventWillPatchAllEvent, all_events[0])
self.assertIsInstance(all_events[1], events.GeventWillPatchModuleEvent)
verify.verifyObject(events.IGeventWillPatchModuleEvent, all_events[1])
self.assertIsInstance(all_events[2], events.GeventDidPatchModuleEvent)
verify.verifyObject(events.IGeventWillPatchModuleEvent, all_events[1])
self.assertIsInstance(all_events[-2], events.GeventDidPatchBuiltinModulesEvent)
verify.verifyObject(events.IGeventDidPatchBuiltinModulesEvent, all_events[-2])
self.assertIsInstance(all_events[-1], events.GeventDidPatchAllEvent)
verify.verifyObject(events.IGeventDidPatchAllEvent, all_events[-1])
for e in all_events:
self.assertFalse(isinstance(e, events.GeventDidPatchModuleEvent)
and e.module_name == 'ssl')
def test_patch_queue(self):
import queue
import gevent.queue as gq
# pylint:disable=no-member
self.assertIs(queue.SimpleQueue, gq.SimpleQueue)
self.assertIs(queue.LifoQueue, gq.LifoQueue)
self.assertIs(queue.Queue, gq.Queue)
self.assertIs(queue.PriorityQueue, gq.PriorityQueue)
if __name__ == '__main__':
unittest.main()
|
TestMonkey
|
python
|
pytorch__pytorch
|
torch/testing/_internal/opinfo/core.py
|
{
"start": 11551,
"end": 25633
}
|
class ____:
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# The majority of this note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
# See also: PyTorch's GitHub wiki on running and writing tests
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do three things:
#
# 1) to allow systematic testing over all PyTorch's operators
# 2) to simplify operating testing by autogenerating many tests
# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# All these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests that could be automatically generated
# still have to be written manually.
#
# It's helpful to understand that OpInfos are both about test simplification and
# modularity. PyTorch is a complicated framework with many interrelated systems,
# too many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo.
#
# It's often surprising to OpInfo writers that just implementing an OpInfo
# typically can't verify an operator is actually implemented correctly:
#
# "If an OpInfo doesn't validate my op works as expected, what's the point
# of it?"
#
# But the point of is the above. OpInfos are intended to let you focus on testing
# the operator logic you're familiar with instead of having to write tests for
# how the operator interacts with each of PyTorch's many systems.
#
# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES
# validate your op works as expected, but that's only in special
# cases. See below for details.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return an iterable of SampleInputs (see the class description
# above). Each SampleInput defines an "input", "args", "kwargs", an
# "output_process_fn_grad" function, the "broadcasts_input" bool and a
# "name".
#
# All the "sample_inputs" functions are invoked within a `torch.no_grad()`
# environment for efficiency and correctness. As such remember to set the
# "requires_grad" flag on the inputs **after** performing any transformations
# on them.
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/main/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# Sample inputs are designed to be used with many tests, some
# that are very time consuming, so they should be a small
# set with small tensors. An elaborated set of sample inputs
# can be specified using the "reference_inputs_func" attribute.
# The "reference inputs" for an operation are an extended
# set of sample inputs that can more exhaustively test an
# operator. They are used by only a few tests that are careful
# not to take too long to run. Adding reference inputs
# is highly encouraged!
#
# THE (OPTIONAL) ERROR INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# OpInfos may optionally specify "error inputs" through an error function. If
# specified test_errors in test_ops.py will call the op with these inputs
# and validate that the desired error is thrown.
#
# Error inputs automate a common testing pattern where multiple inputs are
# passed to an operation and the errors they thrown are reviewed. Tests
# written in this style should be ported to the new OpInfo pattern.
#
# Error inputs are specified using the ErrorInputs class, which contains
# a SampleInput (see above) and data about the expected error.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that the operation produces the same results when called with noncontiguous inputs
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
# - that the operator throws the correct errors (if error_inputs is defined)
# - that the operator produces the same results as a NumPy reference (if ref is defined)
# - that the operator produces the same results as a NumPy reference on an extended
# set of "reference inputs" (if both ref and reference_inputs_func are defined)
# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only
# ref is defined, because they effectively autogenerate reference inputs)
# - that the operator works on different CUDA devices
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not necessarily tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
# The exception to this is if reference testing is sufficient, or if
# the operation belongs to an OpInfo subclass that has more exhaustive
# operator testing. Elementwise unary and elementwise binary operators,
# in particular, usually don't require additional testing beyond
# writing an Opinfo.
#
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to any of the following namespaces:
# - torch
# - torch.fft
# - torch.linalg,
# - torch.special
# - torch.nn.functional
# then you should typically add an OpInfo for it.
#
# As mentioned a couple times above, implementing an OpInfo is not
# usually sufficient testing (unless the operator is a unary or binary elementwise
# operator). The OpInfo will only test the properties described in the
# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is
# implemented correctly.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach to writing an OpInfo can be frustrating,
# but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in utils.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve and cover
# the great majority of PyTorch's (public) operators.
#
# Classes and methods for the operator database
@dataclass
|
AliasInfo
|
python
|
huggingface__transformers
|
src/transformers/models/zoedepth/modeling_zoedepth.py
|
{
"start": 38460,
"end": 39012
}
|
class ____(nn.Module):
def __init__(self, in_features, out_features) -> None:
super().__init__()
hidden_features = in_features
self.linear1 = nn.Linear(in_features, hidden_features)
self.activation = nn.ReLU()
self.linear2 = nn.Linear(hidden_features, out_features)
def forward(self, hidden_state):
hidden_state = self.linear1(hidden_state)
hidden_state = self.activation(hidden_state)
domain_logits = self.linear2(hidden_state)
return domain_logits
|
ZoeDepthMLPClassifier
|
python
|
huggingface__transformers
|
src/transformers/utils/import_utils.py
|
{
"start": 59608,
"end": 77403
}
|
class ____(ModuleType):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
# Very heavily inspired by optuna.integration._IntegrationModule
# https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
def __init__(
self,
name: str,
module_file: str,
import_structure: IMPORT_STRUCTURE_T,
module_spec: importlib.machinery.ModuleSpec | None = None,
extra_objects: dict[str, object] | None = None,
explicit_import_shortcut: dict[str, list[str]] | None = None,
):
super().__init__(name)
self._object_missing_backend = {}
self._explicit_import_shortcut = explicit_import_shortcut if explicit_import_shortcut else {}
if any(isinstance(key, frozenset) for key in import_structure):
self._modules = set()
self._class_to_module = {}
self.__all__ = []
_import_structure = {}
for backends, module in import_structure.items():
missing_backends = []
# This ensures that if a module is importable, then all other keys of the module are importable.
# As an example, in module.keys() we might have the following:
#
# dict_keys(['models.nllb_moe.configuration_nllb_moe', 'models.sew_d.configuration_sew_d'])
#
# with this, we don't only want to be able to import these explicitly, we want to be able to import
# every intermediate module as well. Therefore, this is what is returned:
#
# {
# 'models.nllb_moe.configuration_nllb_moe',
# 'models.sew_d.configuration_sew_d',
# 'models',
# 'models.sew_d', 'models.nllb_moe'
# }
module_keys = set(
chain(*[[k.rsplit(".", i)[0] for i in range(k.count(".") + 1)] for k in list(module.keys())])
)
for backend in backends:
if backend in BACKENDS_MAPPING:
callable, _ = BACKENDS_MAPPING[backend]
else:
if any(key in backend for key in ["=", "<", ">"]):
backend = Backend(backend)
callable = backend.is_satisfied
else:
raise ValueError(
f"Backend should be defined in the BACKENDS_MAPPING. Offending backend: {backend}"
)
try:
if not callable():
missing_backends.append(backend)
except (ModuleNotFoundError, RuntimeError):
missing_backends.append(backend)
self._modules = self._modules.union(module_keys)
for key, values in module.items():
if missing_backends:
self._object_missing_backend[key] = missing_backends
for value in values:
self._class_to_module[value] = key
if missing_backends:
self._object_missing_backend[value] = missing_backends
_import_structure.setdefault(key, []).extend(values)
# Needed for autocompletion in an IDE
self.__all__.extend(module_keys | set(chain(*module.values())))
self.__file__ = module_file
self.__spec__ = module_spec
self.__path__ = [os.path.dirname(module_file)]
self._objects = {} if extra_objects is None else extra_objects
self._name = name
self._import_structure = _import_structure
# This can be removed once every exportable object has a `require()` require.
else:
self._modules = set(import_structure.keys())
self._class_to_module = {}
for key, values in import_structure.items():
for value in values:
self._class_to_module[value] = key
# Needed for autocompletion in an IDE
self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
self.__file__ = module_file
self.__spec__ = module_spec
self.__path__ = [os.path.dirname(module_file)]
self._objects = {} if extra_objects is None else extra_objects
self._name = name
self._import_structure = import_structure
# Needed for autocompletion in an IDE
def __dir__(self):
result = super().__dir__()
# The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
# they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
for attr in self.__all__:
if attr not in result:
result.append(attr)
return result
def __getattr__(self, name: str) -> Any:
if name in self._objects:
return self._objects[name]
if name in self._object_missing_backend:
missing_backends = self._object_missing_backend[name]
class Placeholder(metaclass=DummyObject):
_backends = missing_backends
def __init__(self, *args, **kwargs):
requires_backends(self, missing_backends)
def call(self, *args, **kwargs):
pass
Placeholder.__name__ = name
if name not in self._class_to_module:
module_name = f"transformers.{name}"
else:
module_name = self._class_to_module[name]
if not module_name.startswith("transformers."):
module_name = f"transformers.{module_name}"
Placeholder.__module__ = module_name
value = Placeholder
elif name in self._class_to_module:
try:
module = self._get_module(self._class_to_module[name])
value = getattr(module, name)
except (ModuleNotFoundError, RuntimeError, AttributeError) as e:
# V5: If trying to import a *TokenizerFast symbol, transparently fall back to the
# non-Fast symbol from the same module when available. This lets us keep only one
# backend tokenizer class while preserving legacy public names.
if name.endswith("TokenizerFast"):
fallback_name = name[:-4]
# Prefer importing the module that declares the fallback symbol if known
try:
if fallback_name in self._class_to_module:
fb_module = self._get_module(self._class_to_module[fallback_name])
fallback_value = getattr(fb_module, fallback_name)
else:
module = self._get_module(self._class_to_module[name])
fallback_value = getattr(module, fallback_name)
setattr(self, fallback_name, fallback_value)
value = fallback_value
except Exception:
# If we can't find the fallback here, try converter logic as a last resort
# before giving up
value = None
# Try converter mapping for Fast tokenizers that don't exist
if value is None and name.endswith("TokenizerFast"):
lookup_name = name[:-4]
try:
from ..convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
if lookup_name in SLOW_TO_FAST_CONVERTERS:
converter_class = SLOW_TO_FAST_CONVERTERS[lookup_name]
converter_base_name = converter_class.__name__.replace("Converter", "")
preferred_tokenizer_name = f"{converter_base_name}Tokenizer"
candidate_names = [preferred_tokenizer_name]
for tokenizer_name, tokenizer_converter in SLOW_TO_FAST_CONVERTERS.items():
if tokenizer_converter is converter_class and tokenizer_name != lookup_name:
if tokenizer_name not in candidate_names:
candidate_names.append(tokenizer_name)
# Try to import the preferred candidate directly
import importlib
for candidate_name in candidate_names:
base_tokenizer_class = None
# Try to derive module path from tokenizer name (e.g., "AlbertTokenizer" -> "albert")
# Remove "Tokenizer" suffix and convert to lowercase
if candidate_name.endswith("Tokenizer"):
model_name = candidate_name[:-10].lower() # Remove "Tokenizer"
module_path = f"transformers.models.{model_name}.tokenization_{model_name}"
try:
module = importlib.import_module(module_path)
base_tokenizer_class = getattr(module, candidate_name)
except Exception:
pass
# Fallback: try via _class_to_module
if base_tokenizer_class is None and candidate_name in self._class_to_module:
try:
alias_module = self._get_module(self._class_to_module[candidate_name])
base_tokenizer_class = getattr(alias_module, candidate_name)
except Exception:
continue
# If we still don't have base_tokenizer_class, skip this candidate
if base_tokenizer_class is None:
continue
# If we got here, we have base_tokenizer_class
value = base_tokenizer_class
setattr(self, candidate_name, base_tokenizer_class)
if lookup_name != candidate_name:
setattr(self, lookup_name, value)
setattr(self, name, value)
break
except Exception:
pass
if value is None:
raise ModuleNotFoundError(
f"Could not import module '{name}'. Are this object's requirements defined correctly?"
) from e
else:
raise ModuleNotFoundError(
f"Could not import module '{name}'. Are this object's requirements defined correctly?"
) from e
elif name in self._modules:
try:
value = self._get_module(name)
except (ModuleNotFoundError, RuntimeError) as e:
raise ModuleNotFoundError(
f"Could not import module '{name}'. Are this object's requirements defined correctly?"
) from e
else:
# V5: If a *TokenizerFast symbol is requested but not present in the import structure,
# try to resolve to the corresponding non-Fast symbol's module if available.
if name.endswith("TokenizerFast"):
fallback_name = name[:-4]
if fallback_name in self._class_to_module:
try:
fb_module = self._get_module(self._class_to_module[fallback_name])
value = getattr(fb_module, fallback_name)
setattr(self, fallback_name, value)
setattr(self, name, value)
return value
except Exception:
pass
# V5: If a tokenizer class doesn't exist, check if it should alias to another tokenizer
# via the converter mapping (e.g., FNetTokenizer -> AlbertTokenizer via AlbertConverter)
value = None
if name.endswith("Tokenizer") or name.endswith("TokenizerFast"):
# Strip "Fast" suffix for converter lookup if present
lookup_name = name[:-4] if name.endswith("TokenizerFast") else name
try:
# Lazy import to avoid circular dependencies
from ..convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
# Check if this tokenizer has a converter mapping
if lookup_name in SLOW_TO_FAST_CONVERTERS:
converter_class = SLOW_TO_FAST_CONVERTERS[lookup_name]
# Find which tokenizer class uses the same converter (reverse lookup)
# Prefer the tokenizer that matches the converter name pattern
# (e.g., AlbertConverter -> AlbertTokenizer)
converter_base_name = converter_class.__name__.replace("Converter", "")
preferred_tokenizer_name = f"{converter_base_name}Tokenizer"
# Try preferred tokenizer first
candidate_names = [preferred_tokenizer_name]
# Then try all other tokenizers with the same converter
for tokenizer_name, tokenizer_converter in SLOW_TO_FAST_CONVERTERS.items():
if tokenizer_converter is converter_class and tokenizer_name != lookup_name:
if tokenizer_name not in candidate_names:
candidate_names.append(tokenizer_name)
# Try to import one of the candidate tokenizers
for candidate_name in candidate_names:
if candidate_name in self._class_to_module:
try:
alias_module = self._get_module(self._class_to_module[candidate_name])
base_tokenizer_class = getattr(alias_module, candidate_name)
value = base_tokenizer_class
# Cache both names for future imports
setattr(self, candidate_name, base_tokenizer_class)
if lookup_name != candidate_name:
setattr(self, lookup_name, value)
setattr(self, name, value)
break
except Exception:
# If this candidate fails, try the next one
continue
else:
# Candidate not in _class_to_module - might need recursive resolution
# Try importing it directly to trigger lazy loading
try:
# Try to get it from transformers module to trigger lazy loading
transformers_module = sys.modules.get("transformers")
if transformers_module and hasattr(transformers_module, candidate_name):
base_tokenizer_class = getattr(transformers_module, candidate_name)
value = base_tokenizer_class
if lookup_name != candidate_name:
setattr(self, lookup_name, value)
setattr(self, name, value)
break
except Exception:
continue
except (ImportError, AttributeError):
pass
if value is None:
for key, values in self._explicit_import_shortcut.items():
if name in values:
value = self._get_module(key)
break
if value is None:
raise AttributeError(f"module {self.__name__} has no attribute {name}")
setattr(self, name, value)
return value
def _get_module(self, module_name: str):
try:
return importlib.import_module("." + module_name, self.__name__)
except Exception as e:
raise e
def __reduce__(self):
return (self.__class__, (self._name, self.__file__, self._import_structure))
|
_LazyModule
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/airflow_core/test_dag_processor.py
|
{
"start": 969,
"end": 32212
}
|
class ____:
"""Tests DAG processor."""
@pytest.mark.parametrize(
("airflow_version", "num_docs"),
[
("2.2.0", 0),
("2.3.0", 1),
],
)
def test_only_exists_on_new_airflow_versions(self, airflow_version, num_docs):
"""Standalone Dag Processor was only added from Airflow 2.3 onwards."""
docs = render_chart(
values={
"airflowVersion": airflow_version,
"dagProcessor": {"enabled": True},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert len(docs) == num_docs
@pytest.mark.parametrize(
("airflow_version", "num_docs"),
[
("2.10.4", 0),
("3.0.0", 1),
],
)
def test_enabled_by_airflow_version(self, airflow_version, num_docs):
"""Tests that Dag Processor is enabled by default with Airflow 3"""
docs = render_chart(
values={"airflowVersion": airflow_version},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert len(docs) == num_docs
@pytest.mark.parametrize(
("airflow_version", "enabled"),
[
("2.10.4", False),
("2.10.4", True),
("3.0.0", False),
("3.0.0", True),
],
)
def test_enabled_explicit(self, airflow_version, enabled):
"""Tests that Dag Processor can be enabled/disabled regardless of version"""
docs = render_chart(
values={"airflowVersion": airflow_version, "dagProcessor": {"enabled": enabled}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
if enabled:
assert len(docs) == 1
else:
assert len(docs) == 0
def test_can_be_disabled(self):
"""Standalone Dag Processor is disabled by default."""
docs = render_chart(
values={"dagProcessor": {"enabled": False}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert len(docs) == 0
def test_disable_wait_for_migration(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"waitForMigrations": {"enabled": False},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
actual = jmespath.search(
"spec.template.spec.initContainers[?name=='wait-for-airflow-migrations']", docs[0]
)
assert actual is None
def test_wait_for_migration_security_contexts_are_configurable(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"waitForMigrations": {
"enabled": True,
"securityContexts": {
"container": {
"allowPrivilegeEscalation": False,
"readOnlyRootFilesystem": True,
},
},
},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.initContainers[0].securityContext", docs[0]) == {
"allowPrivilegeEscalation": False,
"readOnlyRootFilesystem": True,
}
def test_should_add_extra_containers(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"extraContainers": [
{"name": "{{ .Chart.Name }}", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[-1]", docs[0]) == {
"name": "airflow",
"image": "test-registry/test-repo:test-tag",
}
def test_should_template_extra_containers(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"extraContainers": [{"name": "{{ .Release.Name }}-test-container"}],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[-1]", docs[0]) == {
"name": "release-name-test-container"
}
def test_should_add_extra_init_containers(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"extraInitContainers": [
{"name": "test-init-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.initContainers[-1]", docs[0]) == {
"name": "test-init-container",
"image": "test-registry/test-repo:test-tag",
}
def test_should_template_extra_init_containers(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"extraInitContainers": [{"name": "{{ .Release.Name }}-test-init-container"}],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.initContainers[-1]", docs[0]) == {
"name": "release-name-test-init-container"
}
def test_should_add_extra_volume_and_extra_volume_mount(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"extraVolumes": [{"name": "test-volume-{{ .Chart.Name }}", "emptyDir": {}}],
"extraVolumeMounts": [
{"name": "test-volume-{{ .Chart.Name }}", "mountPath": "/opt/test"}
],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.volumes[1].name", docs[0]) == "test-volume-airflow"
assert (
jmespath.search("spec.template.spec.containers[0].volumeMounts[0].name", docs[0])
== "test-volume-airflow"
)
assert (
jmespath.search("spec.template.spec.initContainers[0].volumeMounts[0].name", docs[0])
== "test-volume-airflow"
)
def test_should_add_global_volume_and_global_volume_mount(self):
docs = render_chart(
values={
"dagProcessor": {"enabled": True},
"volumes": [{"name": "test-volume", "emptyDir": {}}],
"volumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.volumes[1].name", docs[0]) == "test-volume"
assert (
jmespath.search("spec.template.spec.containers[0].volumeMounts[0].name", docs[0]) == "test-volume"
)
assert (
jmespath.search("spec.template.spec.initContainers[0].volumeMounts[0].name", docs[0])
== "test-volume"
)
def test_should_add_extraEnvs(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"env": [
{"name": "TEST_ENV_1", "value": "test_env_1"},
{
"name": "TEST_ENV_2",
"valueFrom": {"secretKeyRef": {"name": "my-secret", "key": "my-key"}},
},
{
"name": "TEST_ENV_3",
"valueFrom": {"configMapKeyRef": {"name": "my-config-map", "key": "my-key"}},
},
],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search(
"spec.template.spec.containers[0].env", docs[0]
)
assert {
"name": "TEST_ENV_2",
"valueFrom": {"secretKeyRef": {"name": "my-secret", "key": "my-key"}},
} in jmespath.search("spec.template.spec.containers[0].env", docs[0])
assert {
"name": "TEST_ENV_3",
"valueFrom": {"configMapKeyRef": {"name": "my-config-map", "key": "my-key"}},
} in jmespath.search("spec.template.spec.containers[0].env", docs[0])
def test_should_add_extraEnvs_to_wait_for_migration_container(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"waitForMigrations": {
"env": [{"name": "TEST_ENV_1", "value": "test_env_1"}],
},
}
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search(
"spec.template.spec.initContainers[0].env", docs[0]
)
def test_scheduler_name(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}, "schedulerName": "airflow-scheduler"},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert (
jmespath.search(
"spec.template.spec.schedulerName",
docs[0],
)
== "airflow-scheduler"
)
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("kind", docs[0]) == "Deployment"
assert (
jmespath.search(
"spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
== "foo"
)
assert (
jmespath.search(
"spec.template.spec.nodeSelector.diskType",
docs[0],
)
== "ssd"
)
assert (
jmespath.search(
"spec.template.spec.tolerations[0].key",
docs[0],
)
== "dynamic-pods"
)
def test_affinity_tolerations_topology_spread_constraints_and_node_selector_precedence(self):
"""When given both global and triggerer affinity etc, triggerer affinity etc is used."""
expected_affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
}
expected_topology_spread_constraints = {
"maxSkew": 1,
"topologyKey": "foo",
"whenUnsatisfiable": "ScheduleAnyway",
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"affinity": expected_affinity,
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"topologySpreadConstraints": [expected_topology_spread_constraints],
"nodeSelector": {"type": "ssd"},
},
"affinity": {
"nodeAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [
{
"weight": 1,
"preference": {
"matchExpressions": [
{"key": "not-me", "operator": "In", "values": ["true"]},
]
},
}
]
}
},
"tolerations": [
{"key": "not-me", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"topologySpreadConstraints": [
{
"maxSkew": 1,
"topologyKey": "not-me",
"whenUnsatisfiable": "ScheduleAnyway",
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
],
"nodeSelector": {"type": "not-me"},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert expected_affinity == jmespath.search("spec.template.spec.affinity", docs[0])
assert (
jmespath.search(
"spec.template.spec.nodeSelector.type",
docs[0],
)
== "ssd"
)
tolerations = jmespath.search("spec.template.spec.tolerations", docs[0])
assert len(tolerations) == 1
assert tolerations[0]["key"] == "dynamic-pods"
assert expected_topology_spread_constraints == jmespath.search(
"spec.template.spec.topologySpreadConstraints[0]", docs[0]
)
def test_should_create_default_affinity(self):
docs = render_chart(show_only=["templates/scheduler/scheduler-deployment.yaml"])
assert jmespath.search(
"spec.template.spec.affinity.podAntiAffinity."
"preferredDuringSchedulingIgnoredDuringExecution[0]."
"podAffinityTerm.labelSelector.matchLabels",
docs[0],
) == {"component": "scheduler"}
def test_livenessprobe_values_are_configurable(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"livenessProbe": {
"initialDelaySeconds": 111,
"timeoutSeconds": 222,
"failureThreshold": 333,
"periodSeconds": 444,
"command": ["sh", "-c", "echo", "wow such test"],
},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.initialDelaySeconds", docs[0])
== 111
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.timeoutSeconds", docs[0]) == 222
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.failureThreshold", docs[0]) == 333
)
assert jmespath.search("spec.template.spec.containers[0].livenessProbe.periodSeconds", docs[0]) == 444
assert jmespath.search("spec.template.spec.containers[0].livenessProbe.exec.command", docs[0]) == [
"sh",
"-c",
"echo",
"wow such test",
]
@pytest.mark.parametrize(
("airflow_version", "probe_command"),
[
("2.4.9", "airflow jobs check --hostname $(hostname)"),
("2.5.0", "airflow jobs check --local"),
("2.5.2", "airflow jobs check --local --job-type DagProcessorJob"),
],
)
def test_livenessprobe_command_depends_on_airflow_version(self, airflow_version, probe_command):
docs = render_chart(
values={"airflowVersion": f"{airflow_version}", "dagProcessor": {"enabled": True}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert (
probe_command
in jmespath.search("spec.template.spec.containers[0].livenessProbe.exec.command", docs[0])[-1]
)
@pytest.mark.parametrize(
("log_values", "expected_volume"),
[
({"persistence": {"enabled": False}}, {"emptyDir": {}}),
(
{"persistence": {"enabled": False}, "emptyDirConfig": {"sizeLimit": "10Gi"}},
{"emptyDir": {"sizeLimit": "10Gi"}},
),
(
{"persistence": {"enabled": True}},
{"persistentVolumeClaim": {"claimName": "release-name-logs"}},
),
(
{"persistence": {"enabled": True, "existingClaim": "test-claim"}},
{"persistentVolumeClaim": {"claimName": "test-claim"}},
),
],
)
def test_logs_persistence_changes_volume(self, log_values, expected_volume):
docs = render_chart(
values={
"logs": log_values,
"dagProcessor": {"enabled": True},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.volumes[1]", docs[0]) == {
"name": "logs",
**expected_volume,
}
def test_resources_are_configurable(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"resources": {
"limits": {"cpu": "200m", "memory": "128Mi"},
"requests": {"cpu": "300m", "memory": "169Mi"},
},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources.limits.memory", docs[0]) == "128Mi"
assert jmespath.search("spec.template.spec.containers[0].resources.limits.cpu", docs[0]) == "200m"
assert (
jmespath.search("spec.template.spec.containers[0].resources.requests.memory", docs[0]) == "169Mi"
)
assert jmespath.search("spec.template.spec.containers[0].resources.requests.cpu", docs[0]) == "300m"
assert (
jmespath.search("spec.template.spec.initContainers[0].resources.limits.memory", docs[0])
== "128Mi"
)
assert jmespath.search("spec.template.spec.initContainers[0].resources.limits.cpu", docs[0]) == "200m"
assert (
jmespath.search("spec.template.spec.initContainers[0].resources.requests.memory", docs[0])
== "169Mi"
)
assert (
jmespath.search("spec.template.spec.initContainers[0].resources.requests.cpu", docs[0]) == "300m"
)
def test_resources_are_not_added_by_default(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {}
@pytest.mark.parametrize(
("strategy", "expected_strategy"),
[
(None, None),
(
{"rollingUpdate": {"maxSurge": "100%", "maxUnavailable": "50%"}},
{"rollingUpdate": {"maxSurge": "100%", "maxUnavailable": "50%"}},
),
],
)
def test_strategy(self, strategy, expected_strategy):
"""Strategy should be used when we aren't using both LocalExecutor and workers.persistence."""
docs = render_chart(
values={
"dagProcessor": {"enabled": True, "strategy": strategy},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert expected_strategy == jmespath.search("spec.strategy", docs[0])
def test_default_command_and_args(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) is None
assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == [
"bash",
"-c",
"exec airflow dag-processor",
]
@pytest.mark.parametrize(
("revision_history_limit", "global_revision_history_limit"),
[(8, 10), (10, 8), (8, None), (None, 10), (None, None)],
)
def test_revision_history_limit(self, revision_history_limit, global_revision_history_limit):
values = {
"dagProcessor": {
"enabled": True,
}
}
if revision_history_limit:
values["dagProcessor"]["revisionHistoryLimit"] = revision_history_limit
if global_revision_history_limit:
values["revisionHistoryLimit"] = global_revision_history_limit
docs = render_chart(
values=values,
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
expected_result = revision_history_limit or global_revision_history_limit
assert jmespath.search("spec.revisionHistoryLimit", docs[0]) == expected_result
@pytest.mark.parametrize("command", [None, ["custom", "command"]])
@pytest.mark.parametrize("args", [None, ["custom", "args"]])
def test_command_and_args_overrides(self, command, args):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"command": command,
"args": args,
}
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert command == jmespath.search("spec.template.spec.containers[0].command", docs[0])
assert args == jmespath.search("spec.template.spec.containers[0].args", docs[0])
def test_command_and_args_overrides_are_templated(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"command": ["{{ .Release.Name }}"],
"args": ["{{ .Release.Service }}"],
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) == ["release-name"]
assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == ["Helm"]
def test_dags_volume_mount_with_persistence_true(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}, "dags": {"gitSync": {"enabled": True}}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert "dags" in [
vm["name"] for vm in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
]
assert "dags" in [vm["name"] for vm in jmespath.search("spec.template.spec.volumes", docs[0])]
def test_dags_gitsync_sidecar_and_init_container(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}, "dags": {"gitSync": {"enabled": True}}},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert "git-sync" in [c["name"] for c in jmespath.search("spec.template.spec.containers", docs[0])]
assert "git-sync-init" in [
c["name"] for c in jmespath.search("spec.template.spec.initContainers", docs[0])
]
def test_dags_gitsync_with_persistence_no_sidecar_or_init_container(self):
docs = render_chart(
values={
"dagProcessor": {"enabled": True},
"dags": {"gitSync": {"enabled": True}, "persistence": {"enabled": True}},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
# No gitsync sidecar or init container
assert "git-sync" not in [
c["name"] for c in jmespath.search("spec.template.spec.containers", docs[0])
]
assert "git-sync-init" not in [
c["name"] for c in jmespath.search("spec.template.spec.initContainers", docs[0])
]
def test_no_airflow_local_settings(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}, "airflowLocalSettings": None},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
volume_mounts = jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
volume_mounts_init = jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts_init)
def test_airflow_local_settings(self):
docs = render_chart(
values={"dagProcessor": {"enabled": True}, "airflowLocalSettings": "# Well hello!"},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
volume_mount = {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
}
assert volume_mount in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert volume_mount in jmespath.search("spec.template.spec.initContainers[0].volumeMounts", docs[0])
def test_should_add_component_specific_annotations(self):
docs = render_chart(
values={
"dagProcessor": {
"enabled": True,
"annotations": {"test_annotation": "test_annotation_value"},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert "annotations" in jmespath.search("metadata", docs[0])
assert jmespath.search("metadata.annotations", docs[0])["test_annotation"] == "test_annotation_value"
@pytest.mark.parametrize(
("webserver_config", "should_add_volume"),
[
("CSRF_ENABLED = True", True),
(None, False),
],
)
def test_should_add_webserver_config_volume_and_volume_mount_when_exists(
self, webserver_config, should_add_volume
):
expected_volume = {
"name": "webserver-config",
"configMap": {"name": "release-name-webserver-config"},
}
expected_volume_mount = {
"name": "webserver-config",
"mountPath": "/opt/airflow/webserver_config.py",
"subPath": "webserver_config.py",
"readOnly": True,
}
docs = render_chart(
values={
"dagProcessor": {"enabled": True},
"webserver": {"webserverConfig": webserver_config},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
created_volumes = jmespath.search("spec.template.spec.volumes", docs[0])
created_volume_mounts = jmespath.search("spec.template.spec.containers[1].volumeMounts", docs[0])
if should_add_volume:
assert expected_volume in created_volumes
assert expected_volume_mount in created_volume_mounts
else:
assert expected_volume not in created_volumes
assert expected_volume_mount not in created_volume_mounts
def test_validate_if_ssh_params_are_added_with_git_ssh_key(self):
docs = render_chart(
values={
"dagProcessor": {"enabled": True},
"dags": {
"gitSync": {
"enabled": True,
"sshKey": "my-ssh-key",
},
"persistence": {"enabled": False},
},
},
show_only=["templates/dag-processor/dag-processor-deployment.yaml"],
)
assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GITSYNC_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GITSYNC_SSH", "value": "true"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "GITSYNC_SSH_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {
"name": "git-sync-ssh-key",
"secret": {"secretName": "release-name-ssh-secret", "defaultMode": 288},
} in jmespath.search("spec.template.spec.volumes", docs[0])
|
TestDagProcessor
|
python
|
ray-project__ray
|
rllib/env/tests/test_multi_agent_env_runner.py
|
{
"start": 1224,
"end": 7429
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(self) -> None:
ray.shutdown()
def test_sample_timesteps(self):
# Build a multi agent config.
config = self._build_config()
# Create a `MultiAgentEnvRunner` instance.
env_runner = MultiAgentEnvRunner(config=config)
# Now sample 10 timesteps.
episodes = env_runner.sample(num_timesteps=10)
# Assert that we have 10 timesteps sampled.
check(sum(len(episode) for episode in episodes), 10)
# Now sample 200 timesteps.
episodes = env_runner.sample(num_timesteps=200)
# Ensure that two episodes are returned.
# Note, after 200 timesteps the test environment truncates.
self.assertGreaterEqual(len(episodes), 2)
# Also ensure that the first episode was truncated.
check(episodes[0].is_terminated, True)
# Assert that indeed 200 timesteps were sampled.
check(sum(len(e) for e in episodes), 200)
# Assert that the timesteps however in the episodes are 210.
# Note, the first episode started at `t_started=10`.
check(sum(e.env_t for e in episodes), 210)
# Assert that all agents extra model outputs are recorded.
for agent_eps in episodes[0].agent_episodes.values():
check("action_logp" in agent_eps.extra_model_outputs, True)
check(
len(agent_eps.actions),
len(agent_eps.extra_model_outputs["action_logp"]),
)
check(
len(agent_eps.actions),
len(agent_eps.extra_model_outputs["action_dist_inputs"]),
)
def test_sample_episodes(self):
# Build a multi agent config.
config = self._build_config()
# Create a `MultiAgentEnvRunner` instance.
env_runner = MultiAgentEnvRunner(config=config)
# Now sample 5 episodes.
episodes = env_runner.sample(num_episodes=5)
# Assert that we have 5 episodes sampled.
check(len(episodes), 5)
# Also assert that the episodes are indeed truncated.
check(all(eps.is_terminated for eps in episodes), True)
# Assert that all agents have the extra model outputs.
for eps in episodes:
for agent_eps in eps.agent_episodes.values():
check("action_logp" in agent_eps.extra_model_outputs, True)
check(
len(agent_eps.actions),
len(agent_eps.extra_model_outputs["action_logp"]),
)
check(
len(agent_eps.actions),
len(agent_eps.extra_model_outputs["action_dist_inputs"]),
)
# Now sample 10 timesteps and then 1 episode.
episodes = env_runner.sample(num_timesteps=10)
episodes += env_runner.sample(num_episodes=1)
# Ensure that the episodes both start at zero.
for eps in episodes:
check(eps.env_t_started, 0)
# Now sample 1 episode and then 10 timesteps.
episodes = env_runner.sample(num_episodes=1)
episodes += env_runner.sample(num_timesteps=10)
# Assert that in both cases we start at zero.
for eps in episodes:
check(eps.env_t_started, 0)
def test_counting_by_agent_steps(self):
"""Tests whether counting by agent_steps works."""
# Build a multi agent config.
config = self._build_config(num_agents=4, num_policies=1)
config.multi_agent(count_steps_by="agent_steps")
config.env_runners(
rollout_fragment_length=20,
num_envs_per_env_runner=4,
)
# Create a `MultiAgentEnvRunner` instance.
env_runner = MultiAgentEnvRunner(config=config)
episodes = env_runner.sample()
assert len(episodes) == 4
assert all(e.agent_steps() == 20 for e in episodes)
def _build_config(self, num_agents=2, num_policies=2):
# Build the configuration and use `PPO`.
assert num_policies == 1 or num_agents == num_policies
config = (
PPOConfig()
.environment(
MultiAgentCartPole,
env_config={"num_agents": num_agents},
)
.multi_agent(
policies={f"p{i}" for i in range(num_policies)},
policy_mapping_fn=(
lambda aid, *args, **kwargs: (
f"p{aid}" if num_agents == num_policies else "p0"
)
),
)
)
return config
def test_on_episode_end_callback(self):
"""Check that callback only happens once for each completed episode.
Related to https://github.com/ray-project/ray/issues/55452
"""
config = (
PPOConfig()
.environment(
MultiAgentCartPole,
env_config={"num_agents": 1},
)
.multi_agent(
policies={"p0"}, policy_mapping_fn=(lambda aid, *args, **kwargs: "p0")
)
.env_runners(
env_to_module_connector=EpisodeTracker,
)
)
for num_envs, num_episodes in [(1, 1), (4, 4), (1, 4)]:
config.env_runners(num_envs_per_env_runner=num_envs)
env_runner = MultiAgentEnvRunner(config=config)
self.assertTrue(
isinstance(env_runner._env_to_module.connectors[0], EpisodeTracker)
)
self.assertEqual(
env_runner._env_to_module.connectors[0].episode_end_counter, 0
)
sampled_episodes = env_runner.sample(
num_episodes=num_episodes, random_actions=True
)
self.assertEqual(len(sampled_episodes), num_episodes)
self.assertEqual(
env_runner._env_to_module.connectors[0].episode_end_counter,
num_episodes,
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestMultiAgentEnvRunner
|
python
|
scipy__scipy
|
scipy/stats/_probability_distribution.py
|
{
"start": 173,
"end": 69975
}
|
class ____(ABC):
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
@abstractmethod
def support(self):
r"""Support of the random variable
The support of a random variable is set of all possible outcomes;
i.e., the subset of the domain of argument :math:`x` for which
the probability density function :math:`f(x)` is nonzero.
This function returns lower and upper bounds of the support.
Returns
-------
out : tuple of Array
The lower and upper bounds of the support.
See Also
--------
pdf
References
----------
.. [1] Support (mathematics), *Wikipedia*,
https://en.wikipedia.org/wiki/Support_(mathematics)
Notes
-----
Suppose a continuous probability distribution has support ``(l, r)``.
The following table summarizes the value returned by several
methods when the argument is outside the support.
+----------------+---------------------+---------------------+
| Method | Value for ``x < l`` | Value for ``x > r`` |
+================+=====================+=====================+
| ``pdf(x)`` | 0 | 0 |
+----------------+---------------------+---------------------+
| ``logpdf(x)`` | -inf | -inf |
+----------------+---------------------+---------------------+
| ``cdf(x)`` | 0 | 1 |
+----------------+---------------------+---------------------+
| ``logcdf(x)`` | -inf | 0 |
+----------------+---------------------+---------------------+
| ``ccdf(x)`` | 1 | 0 |
+----------------+---------------------+---------------------+
| ``logccdf(x)`` | 0 | -inf |
+----------------+---------------------+---------------------+
For discrete distributions, the same table is applicable with
``pmf`` and ``logpmf`` substituted for ``pdf`` and ``logpdf``.
For the ``cdf`` and related methods of continuous distributions, the
inequality need not be strict; i.e. the tabulated value is returned
when the method is evaluated *at* the corresponding boundary.
The following table summarizes the value returned by the inverse
methods for arguments ``0`` and ``1``, whether the distribution
is continuous or discrete.
+-------------+-----------+-----------+
| Method | ``x = 0`` | ``x = 1`` |
+=============+===========+===========+
| ``icdf(x)`` | ``l`` | ``r`` |
+-------------+-----------+-----------+
| ``icdf(x)`` | ``r`` | ``l`` |
+-------------+-----------+-----------+
For the inverse log-functions, the same values are returned
for ``x = log(0)`` and ``x = log(1)``. All inverse functions return
``nan`` when evaluated at an argument outside the domain ``0`` to ``1``.
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Retrieve the support of the distribution:
>>> X.support()
(-0.5, 0.5)
For a distribution with infinite support,
>>> X = stats.Normal()
>>> X.support()
(-inf, inf)
Due to underflow, the numerical value returned by the PDF may be zero
even for arguments within the support, even if the true value is
nonzero. In such cases, the log-PDF may be useful.
>>> X.pdf([-100., 100.])
array([0., 0.])
>>> X.logpdf([-100., 100.])
array([-5000.91893853, -5000.91893853])
Use cases for the log-CDF and related methods are analogous.
"""
raise NotImplementedError()
@abstractmethod
def sample(self, shape, *, method, rng):
r"""Random sample from the distribution.
Parameters
----------
shape : tuple of ints, default: ()
The shape of the sample to draw. If the parameters of the distribution
underlying the random variable are arrays of shape ``param_shape``,
the output array will be of shape ``shape + param_shape``.
method : {None, 'formula', 'inverse_transform'}
The strategy used to produce the sample. By default (``None``),
the infrastructure chooses between the following options,
listed in order of precedence.
- ``'formula'``: an implementation specific to the distribution
- ``'inverse_transform'``: generate a uniformly distributed sample and
return the inverse CDF at these arguments.
Not all `method` options are available for all distributions.
If the selected `method` is not available, a `NotImplementedError``
will be raised.
rng : `numpy.random.Generator` or `scipy.stats.QMCEngine`, optional
Pseudo- or quasi-random number generator state. When `rng` is None,
a new `numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` and
`scipy.stats.QMCEngine` are passed to `numpy.random.default_rng`
to instantiate a ``Generator``.
If `rng` is an instance of `scipy.stats.QMCEngine` configured to use
scrambling and `shape` is not empty, then each slice along the zeroth
axis of the result is a "quasi-independent", low-discrepancy sequence;
that is, they are distinct sequences that can be treated as statistically
independent for most practical purposes. Separate calls to `sample`
produce new quasi-independent, low-discrepancy sequences.
References
----------
.. [1] Sampling (statistics), *Wikipedia*,
https://en.wikipedia.org/wiki/Sampling_(statistics)
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=0., b=1.)
Generate a pseudorandom sample:
>>> x = X.sample((1000, 1))
>>> octiles = (np.arange(8) + 1) / 8
>>> np.count_nonzero(x <= octiles, axis=0)
array([ 148, 263, 387, 516, 636, 751, 865, 1000]) # may vary
>>> X = stats.Uniform(a=np.zeros((3, 1)), b=np.ones(2))
>>> X.a.shape,
(3, 2)
>>> x = X.sample(shape=(5, 4))
>>> x.shape
(5, 4, 3, 2)
"""
raise NotImplementedError()
@abstractmethod
def moment(self, order, kind, *, method):
r"""Raw, central, or standard moment of positive integer order.
In terms of probability density function :math:`f(x)` and support
:math:`\chi`, the "raw" moment (about the origin) of order :math:`n` of
a continuous random variable :math:`X` is:
.. math::
\mu'_n(X) = \int_{\chi} x^n f(x) dx
The "central" moment is the raw moment taken about the mean,
:math:`\mu = \mu'_1`:
.. math::
\mu_n(X) = \int_{\chi} (x - \mu) ^n f(x) dx
The "standardized" moment is the central moment normalized by the
:math:`n^\text{th}` power of the standard deviation
:math:`\sigma = \sqrt{\mu_2}` to produce a scale invariant quantity:
.. math::
\tilde{\mu}_n(X) = \frac{\mu_n(X)}
{\sigma^n}
The definitions for discrete random variables are analogous, with
sums over the support replacing the integrals.
Parameters
----------
order : int
The integer order of the moment; i.e. :math:`n` in the formulae above.
kind : {'raw', 'central', 'standardized'}
Whether to return the raw (default), central, or standardized moment
defined above.
method : {None, 'formula', 'general', 'transform', 'normalize', 'quadrature', 'cache'}
The strategy used to evaluate the moment. By default (``None``),
the infrastructure chooses between the following options,
listed in order of precedence.
- ``'cache'``: use the value of the moment most recently calculated
via another method
- ``'formula'``: use a formula for the moment itself
- ``'general'``: use a general result that is true for all distributions
with finite moments; for instance, the zeroth raw moment is
identically 1
- ``'transform'``: transform a raw moment to a central moment or
vice versa (see Notes)
- ``'normalize'``: normalize a central moment to get a standardized
or vice versa
- ``'quadrature'``: numerically integrate (or, in the discrete case, sum)
according to the definition
Not all `method` options are available for all orders, kinds, and
distributions. If the selected `method` is not available, a
``NotImplementedError`` will be raised.
Returns
-------
out : array
The moment of the random variable of the specified order and kind.
See Also
--------
pdf
mean
variance
standard_deviation
skewness
kurtosis
Notes
-----
Not all distributions have finite moments of all orders; moments of some
orders may be undefined or infinite. If a formula for the moment is not
specifically implemented for the chosen distribution, SciPy will attempt
to compute the moment via a generic method, which may yield a finite
result where none exists. This is not a critical bug, but an opportunity
for an enhancement.
The definition of a raw moment in the summary is specific to the raw moment
about the origin. The raw moment about any point :math:`a` is:
.. math::
E[(X-a)^n] = \int_{\chi} (x-a)^n f(x) dx
In this notation, a raw moment about the origin is :math:`\mu'_n = E[x^n]`,
and a central moment is :math:`\mu_n = E[(x-\mu)^n]`, where :math:`\mu`
is the first raw moment; i.e. the mean.
The ``'transform'`` method takes advantage of the following relationships
between moments taken about different points :math:`a` and :math:`b`.
.. math::
E[(X-b)^n] = \sum_{i=0}^n E[(X-a)^i] {n \choose i} (a - b)^{n-i}
For instance, to transform the raw moment to the central moment, we let
:math:`b = \mu` and :math:`a = 0`.
The distribution infrastructure provides flexibility for distribution
authors to implement separate formulas for raw moments, central moments,
and standardized moments of any order. By default, the moment of the
desired order and kind is evaluated from the formula if such a formula
is available; if not, the infrastructure uses any formulas that are
available rather than resorting directly to numerical integration.
For instance, if formulas for the first three raw moments are
available and the third standardized moments is desired, the
infrastructure will evaluate the raw moments and perform the transforms
and standardization required. The decision tree is somewhat complex,
but the strategy for obtaining a moment of a given order and kind
(possibly as an intermediate step due to the recursive nature of the
transform formula above) roughly follows this order of priority:
#. Use cache (if order of same moment and kind has been calculated)
#. Use formula (if available)
#. Transform between raw and central moment and/or normalize to convert
between central and standardized moments (if efficient)
#. Use a generic result true for most distributions (if available)
#. Use quadrature
References
----------
.. [1] Moment, *Wikipedia*,
https://en.wikipedia.org/wiki/Moment_(mathematics)
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the first raw moment:
>>> X.moment(order=1, kind='raw')
1.0
>>> X.moment(order=1, kind='raw') == X.mean() == X.mu
True
Evaluate the second central moment:
>>> X.moment(order=2, kind='central')
4.0
>>> X.moment(order=2, kind='central') == X.variance() == X.sigma**2
True
Evaluate the fourth standardized moment:
>>> X.moment(order=4, kind='standardized')
3.0
>>> X.moment(order=4, kind='standardized') == X.kurtosis(convention='non-excess')
True
""" # noqa:E501
raise NotImplementedError()
@abstractmethod
def mean(self, *, method):
r"""Mean (raw first moment about the origin)
Parameters
----------
method : {None, 'formula', 'transform', 'quadrature', 'cache'}
Method used to calculate the raw first moment. Not
all methods are available for all distributions. See
`moment` for details.
See Also
--------
moment
median
mode
References
----------
.. [1] Mean, *Wikipedia*,
https://en.wikipedia.org/wiki/Mean#Mean_of_a_probability_distribution
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the variance:
>>> X.mean()
1.0
>>> X.mean() == X.moment(order=1, kind='raw') == X.mu
True
"""
raise NotImplementedError()
@abstractmethod
def median(self, *, method):
r"""Median (50th percentile)
If a continuous random variable :math:`X` has probability :math:`0.5` of
taking on a value less than :math:`m`, then :math:`m` is the median.
More generally, a median is a value :math:`m` for which:
.. math::
P(X ≤ m) ≤ 0.5 ≥ P(X ≥ m)
For discrete random variables, the median may not be unique, in which
case the smallest value satisfying the definition is reported.
Parameters
----------
method : {None, 'formula', 'icdf'}
The strategy used to evaluate the median.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the median
- ``'icdf'``: evaluate the inverse CDF of 0.5
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The median
See Also
--------
mean
mode
icdf
References
----------
.. [1] Median, *Wikipedia*,
https://en.wikipedia.org/wiki/Median#Probability_distributions
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Uniform(a=0., b=10.)
Compute the median:
>>> X.median()
np.float64(5.0)
>>> X.median() == X.icdf(0.5) == X.iccdf(0.5)
True
"""
raise NotImplementedError()
@abstractmethod
def mode(self, *, method):
r"""Mode (most likely value)
Informally, the mode is a value that a random variable has the highest
probability (density) of assuming. That is, the mode is the element of
the support :math:`\chi` that maximizes the probability density (or mass,
for discrete random variables) function :math:`f(x)`:
.. math::
\text{mode} = \arg\max_{x \in \chi} f(x)
Parameters
----------
method : {None, 'formula', 'optimization'}
The strategy used to evaluate the mode.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the median
- ``'optimization'``: numerically maximize the PDF/PMF
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The mode
See Also
--------
mean
median
pdf
Notes
-----
For some distributions
#. the mode is not unique (e.g. the uniform distribution);
#. the PDF has one or more singularities, and it is debatable whether
a singularity is considered to be in the domain and called the mode
(e.g. the gamma distribution with shape parameter less than 1); and/or
#. the probability density function may have one or more local maxima
that are not a global maximum (e.g. mixture distributions).
In such cases, `mode` will
#. return a single value,
#. consider the mode to occur at a singularity, and/or
#. return a local maximum which may or may not be a global maximum.
If a formula for the mode is not specifically implemented for the
chosen distribution, SciPy will attempt to compute the mode
numerically, which may not meet the user's preferred definition of a
mode. In such cases, the user is encouraged to subclass the
distribution and override ``mode``.
References
----------
.. [1] Mode (statistics), *Wikipedia*,
https://en.wikipedia.org/wiki/Mode_(statistics)
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the mode:
>>> X.mode()
1.0
If the mode is not uniquely defined, ``mode`` nonetheless returns a
single value.
>>> X = stats.Uniform(a=0., b=1.)
>>> X.mode()
0.5
If this choice does not satisfy your requirements, subclass the
distribution and override ``mode``:
>>> class BetterUniform(stats.Uniform):
... def mode(self):
... return self.b
>>> X = BetterUniform(a=0., b=1.)
>>> X.mode()
1.0
"""
raise NotImplementedError()
@abstractmethod
def variance(self, *, method):
r"""Variance (central second moment)
Parameters
----------
method : {None, 'formula', 'transform', 'normalize', 'quadrature', 'cache'}
Method used to calculate the central second moment. Not
all methods are available for all distributions. See
`moment` for details.
See Also
--------
moment
standard_deviation
mean
References
----------
.. [1] Variance, *Wikipedia*,
https://en.wikipedia.org/wiki/Variance#Absolutely_continuous_random_variable
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the variance:
>>> X.variance()
4.0
>>> X.variance() == X.moment(order=2, kind='central') == X.sigma**2
True
"""
raise NotImplementedError()
@abstractmethod
def standard_deviation(self, *, method):
r"""Standard deviation (square root of the second central moment)
Parameters
----------
method : {None, 'formula', 'transform', 'normalize', 'quadrature', 'cache'}
Method used to calculate the central second moment. Not
all methods are available for all distributions. See
`moment` for details.
See Also
--------
variance
mean
moment
References
----------
.. [1] Standard deviation, *Wikipedia*,
https://en.wikipedia.org/wiki/Standard_deviation#Definition_of_population_values
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the standard deviation:
>>> X.standard_deviation()
2.0
>>> X.standard_deviation() == X.moment(order=2, kind='central')**0.5 == X.sigma
True
"""
raise NotImplementedError()
@abstractmethod
def skewness(self, *, method):
r"""Skewness (standardized third moment)
Parameters
----------
method : {None, 'formula', 'general', 'transform', 'normalize', 'cache'}
Method used to calculate the standardized third moment. Not
all methods are available for all distributions. See
`moment` for details.
See Also
--------
moment
mean
variance
References
----------
.. [1] Skewness, *Wikipedia*,
https://en.wikipedia.org/wiki/Skewness
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the skewness:
>>> X.skewness()
0.0
>>> X.skewness() == X.moment(order=3, kind='standardized')
True
"""
raise NotImplementedError()
@abstractmethod
def kurtosis(self, *, method):
r"""Kurtosis (standardized fourth moment)
By default, this is the standardized fourth moment, also known as the
"non-excess" or "Pearson" kurtosis (e.g. the kurtosis of the normal
distribution is 3). The "excess" or "Fisher" kurtosis (the standardized
fourth moment minus 3) is available via the `convention` parameter.
Parameters
----------
method : {None, 'formula', 'general', 'transform', 'normalize', 'cache'}
Method used to calculate the standardized fourth moment. Not
all methods are available for all distributions. See
`moment` for details.
convention : {'non-excess', 'excess'}
Two distinct conventions are available:
- ``'non-excess'``: the standardized fourth moment (Pearson's kurtosis)
- ``'excess'``: the standardized fourth moment minus 3 (Fisher's kurtosis)
The default is ``'non-excess'``.
See Also
--------
moment
mean
variance
References
----------
.. [1] Kurtosis, *Wikipedia*,
https://en.wikipedia.org/wiki/Kurtosis
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Normal(mu=1., sigma=2.)
Evaluate the kurtosis:
>>> X.kurtosis()
3.0
>>> (X.kurtosis()
... == X.kurtosis(convention='excess') + 3.
... == X.moment(order=4, kind='standardized'))
True
"""
raise NotImplementedError()
@abstractmethod
def pdf(self, x, /, *, method):
r"""Probability density function
The probability density function ("PDF"), denoted :math:`f(x)`, is the
probability *per unit length* that the random variable will assume the
value :math:`x`. Mathematically, it can be defined as the derivative
of the cumulative distribution function :math:`F(x)`:
.. math::
f(x) = \frac{d}{dx} F(x)
`pdf` accepts `x` for :math:`x`.
Parameters
----------
x : array_like
The argument of the PDF.
method : {None, 'formula', 'logexp'}
The strategy used to evaluate the PDF. By default (``None``), the
infrastructure chooses between the following options, listed in
order of precedence.
- ``'formula'``: use a formula for the PDF itself
- ``'logexp'``: evaluate the log-PDF and exponentiate
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The PDF evaluated at the argument `x`.
See Also
--------
cdf
logpdf
Notes
-----
Suppose a continuous probability distribution has support :math:`[l, r]`.
By definition of the support, the PDF evaluates to its minimum value
of :math:`0` outside the support; i.e. for :math:`x < l` or
:math:`x > r`. The maximum of the PDF may be less than or greater than
:math:`1`; since the value is a probability *density*, only its integral
over the support must equal :math:`1`.
For discrete distributions, `pdf` returns ``inf`` at supported points
and ``0`` elsewhere.
References
----------
.. [1] Probability density function, *Wikipedia*,
https://en.wikipedia.org/wiki/Probability_density_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Uniform(a=-1., b=1.)
Evaluate the PDF at the desired argument:
>>> X.pdf(0.25)
0.5
"""
raise NotImplementedError()
@abstractmethod
def logpdf(self, x, /, *, method):
r"""Log of the probability density function
The probability density function ("PDF"), denoted :math:`f(x)`, is the
probability *per unit length* that the random variable will assume the
value :math:`x`. Mathematically, it can be defined as the derivative
of the cumulative distribution function :math:`F(x)`:
.. math::
f(x) = \frac{d}{dx} F(x)
`logpdf` computes the logarithm of the probability density function
("log-PDF"), :math:`\log(f(x))`, but it may be numerically favorable
compared to the naive implementation (computing :math:`f(x)` and
taking the logarithm).
`logpdf` accepts `x` for :math:`x`.
Parameters
----------
x : array_like
The argument of the log-PDF.
method : {None, 'formula', 'logexp'}
The strategy used to evaluate the log-PDF. By default (``None``), the
infrastructure chooses between the following options, listed in order
of precedence.
- ``'formula'``: use a formula for the log-PDF itself
- ``'logexp'``: evaluate the PDF and takes its logarithm
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The log-PDF evaluated at the argument `x`.
See Also
--------
pdf
logcdf
Notes
-----
Suppose a continuous probability distribution has support :math:`[l, r]`.
By definition of the support, the log-PDF evaluates to its minimum value
of :math:`-\infty` (i.e. :math:`\log(0)`) outside the support; i.e. for
:math:`x < l` or :math:`x > r`. The maximum of the log-PDF may be less
than or greater than :math:`\log(1) = 0` because the maximum of the PDF
can be any positive real.
For distributions with infinite support, it is common for `pdf` to return
a value of ``0`` when the argument is theoretically within the support;
this can occur because the true value of the PDF is too small to be
represented by the chosen dtype. The log-PDF, however, will often be finite
(not ``-inf``) over a much larger domain. Consequently, it may be preferred
to work with the logarithms of probabilities and probability densities to
avoid underflow.
For discrete distributions, `logpdf` returns ``inf`` at supported points and
``-inf`` (``log(0)``) elsewhere.
References
----------
.. [1] Probability density function, *Wikipedia*,
https://en.wikipedia.org/wiki/Probability_density_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-1.0, b=1.0)
Evaluate the log-PDF at the desired argument:
>>> X.logpdf(0.5)
-0.6931471805599453
>>> np.allclose(X.logpdf(0.5), np.log(X.pdf(0.5)))
True
"""
raise NotImplementedError()
def pmf(self, x, /, *, method=None):
r"""Probability mass function
The probability mass function ("PMF"), denoted :math:`f(x)`, is the
probability that the random variable :math:`X` will assume the value :math:`x`.
.. math::
f(x) = P(X = x)
`pmf` accepts `x` for :math:`x`.
Parameters
----------
x : array_like
The argument of the PMF.
method : {None, 'formula', 'logexp'}
The strategy used to evaluate the PMF. By default (``None``), the
infrastructure chooses between the following options, listed in
order of precedence.
- ``'formula'``: use a formula for the PMF itself
- ``'logexp'``: evaluate the log-PMF and exponentiate
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The PMF evaluated at the argument `x`.
See Also
--------
cdf
logpmf
Notes
-----
Suppose a discrete probability distribution has support over the integers
:math:`{l, l+1, ..., r-1, r}`.
By definition of the support, the PMF evaluates to its minimum value
of :math:`0` for non-integral :math:`x` and for :math:`x` outside the support;
i.e. for :math:`x < l` or :math:`x > r`.
For continuous distributions, `pmf` returns ``0`` at all real arguments.
References
----------
.. [1] Probability mass function, *Wikipedia*,
https://en.wikipedia.org/wiki/Probability_mass_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Binomial(n=10, p=0.5)
Evaluate the PMF at the desired argument:
>>> X.pmf(5)
np.float64(0.24609375)
"""
raise NotImplementedError()
def logpmf(self, x, /, *, method=None):
r"""Log of the probability mass function
The probability mass function ("PMF"), denoted :math:`f(x)`, is the
probability that the random variable :math:`X` will assume the value :math:`x`.
.. math::
f(x) = \frac{d}{dx} F(x)
`logpmf` computes the logarithm of the probability mass function
("log-PMF"), :math:`\log(f(x))`, but it may be numerically favorable
compared to the naive implementation (computing :math:`f(x)` and
taking the logarithm).
`logpmf` accepts `x` for :math:`x`.
Parameters
----------
x : array_like
The argument of the log-PMF.
method : {None, 'formula', 'logexp'}
The strategy used to evaluate the log-PMF. By default (``None``), the
infrastructure chooses between the following options, listed in order
of precedence.
- ``'formula'``: use a formula for the log-PMF itself
- ``'logexp'``: evaluate the PMF and takes its logarithm
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The log-PMF evaluated at the argument `x`.
See Also
--------
pmf
logcdf
Notes
-----
Suppose a discrete probability distribution has support over the integers
:math:`{l, l+1, ..., r-1, r}`.
By definition of the support, the log-PMF evaluates to its minimum value
of :math:`-\infty` (i.e. :math:`\log(0)`) for non-integral :math:`x` and
for :math:`x` outside the support; i.e. for :math:`x < l` or :math:`x > r`.
For distributions with infinite support, it is common for `pmf` to return
a value of ``0`` when the argument is theoretically within the support;
this can occur because the true value of the PMF is too small to be
represented by the chosen dtype. The log-PMF, however, will often be finite
(not ``-inf``) over a much larger domain. Consequently, it may be preferred
to work with the logarithms of probabilities and probability densities to
avoid underflow.
References
----------
.. [1] Probability density function, *Wikipedia*,
https://en.wikipedia.org/wiki/Probability_density_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Binomial(n=10, p=0.5)
Evaluate the log-PMF at the desired argument:
>>> X.logpmf(5)
np.float64(-1.4020427180880297)
>>> np.allclose(X.logpmf(5), np.log(X.pmf(5)))
True
"""
raise NotImplementedError()
@abstractmethod
def cdf(self, x, y, /, *, method):
r"""Cumulative distribution function
The cumulative distribution function ("CDF"), denoted :math:`F(x)`, is
the probability the random variable :math:`X` will assume a value
less than or equal to :math:`x`:
.. math::
F(x) = P(X ≤ x)
A two-argument variant of this function is also defined as the
probability the random variable :math:`X` will assume a value between
:math:`x` and :math:`y`.
.. math::
F(x, y) = P(x ≤ X ≤ y)
`cdf` accepts `x` for :math:`x` and `y` for :math:`y`.
Parameters
----------
x, y : array_like
The arguments of the CDF. `x` is required; `y` is optional.
method : {None, 'formula', 'logexp', 'complement', 'quadrature', 'subtraction'}
The strategy used to evaluate the CDF.
By default (``None``), the one-argument form of the function
chooses between the following options, listed in order of precedence.
- ``'formula'``: use a formula for the CDF itself
- ``'logexp'``: evaluate the log-CDF and exponentiate
- ``'complement'``: evaluate the CCDF and take the complement
- ``'quadrature'``: numerically integrate the PDF (or, in the discrete
case, sum the PMF)
In place of ``'complement'``, the two-argument form accepts:
- ``'subtraction'``: compute the CDF at each argument and take
the difference.
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The CDF evaluated at the provided argument(s).
See Also
--------
logcdf
ccdf
Notes
-----
Suppose a continuous probability distribution has support :math:`[l, r]`.
The CDF :math:`F(x)` is related to the probability density function
:math:`f(x)` by:
.. math::
F(x) = \int_l^x f(u) du
The two argument version is:
.. math::
F(x, y) = \int_x^y f(u) du = F(y) - F(x)
The CDF evaluates to its minimum value of :math:`0` for :math:`x ≤ l`
and its maximum value of :math:`1` for :math:`x ≥ r`.
Suppose a discrete probability distribution has support :math:`[l, r]`.
The CDF :math:`F(x)` is related to the probability mass function
:math:`f(x)` by:
.. math::
F(x) = \sum_{u=l}^{\lfloor x \rfloor} f(u)
The CDF evaluates to its minimum value of :math:`0` for :math:`x < l`
and its maximum value of :math:`1` for :math:`x ≥ r`.
The CDF is also known simply as the "distribution function".
References
----------
.. [1] Cumulative distribution function, *Wikipedia*,
https://en.wikipedia.org/wiki/Cumulative_distribution_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the CDF at the desired argument:
>>> X.cdf(0.25)
0.75
Evaluate the cumulative probability between two arguments:
>>> X.cdf(-0.25, 0.25) == X.cdf(0.25) - X.cdf(-0.25)
True
""" # noqa: E501
raise NotImplementedError()
@abstractmethod
def icdf(self, p, /, *, method):
r"""Inverse of the cumulative distribution function.
For monotonic continuous distributions, the inverse of the cumulative
distribution function ("inverse CDF"), denoted :math:`F^{-1}(p)`, is the
argument :math:`x` for which the cumulative distribution function
:math:`F(x)` evaluates to :math:`p`.
.. math::
F^{-1}(p) = x \quad \text{s.t.} \quad F(x) = p
When a strict "inverse" of the cumulative distribution function does not
exist (e.g. discrete random variables), the "inverse CDF" is defined by
convention as the smallest value within the support :math:`\chi` for which
:math:`F(x)` is at least :math:`p`.
.. math::
F^{-1}(p) = \min_\chi \quad \text{s.t.} \quad F(x) ≥ p
`icdf` accepts `p` for :math:`p \in [0, 1]`.
Parameters
----------
p : array_like
The argument of the inverse CDF.
method : {None, 'formula', 'complement', 'inversion'}
The strategy used to evaluate the inverse CDF.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the inverse CDF itself
- ``'complement'``: evaluate the inverse CCDF at the
complement of `p`
- ``'inversion'``: solve numerically for the argument at which the
CDF is equal to `p`
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The inverse CDF evaluated at the provided argument.
See Also
--------
cdf
ilogcdf
Notes
-----
Suppose a probability distribution has support :math:`[l, r]`. The
inverse CDF returns its minimum value of :math:`l` at :math:`p = 0`
and its maximum value of :math:`r` at :math:`p = 1`. Because the CDF
has range :math:`[0, 1]`, the inverse CDF is only defined on the
domain :math:`[0, 1]`; for :math:`p < 0` and :math:`p > 1`, `icdf`
returns ``nan``.
The inverse CDF is also known as the quantile function, percentile function,
and percent-point function.
References
----------
.. [1] Quantile function, *Wikipedia*,
https://en.wikipedia.org/wiki/Quantile_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the inverse CDF at the desired argument:
>>> X.icdf(0.25)
-0.25
>>> np.allclose(X.cdf(X.icdf(0.25)), 0.25)
True
This function returns NaN when the argument is outside the domain.
>>> X.icdf([-0.1, 0, 1, 1.1])
array([ nan, -0.5, 0.5, nan])
"""
raise NotImplementedError()
@abstractmethod
def ccdf(self, x, y, /, *, method):
r"""Complementary cumulative distribution function
The complementary cumulative distribution function ("CCDF"), denoted
:math:`G(x)`, is the complement of the cumulative distribution function
:math:`F(x)`; i.e., probability the random variable :math:`X` will
assume a value greater than :math:`x`:
.. math::
G(x) = 1 - F(x) = P(X > x)
A two-argument variant of this function is:
.. math::
G(x, y) = 1 - F(x, y) = P(X < x \text{ or } X > y)
`ccdf` accepts `x` for :math:`x` and `y` for :math:`y`.
Parameters
----------
x, y : array_like
The arguments of the CCDF. `x` is required; `y` is optional.
method : {None, 'formula', 'logexp', 'complement', 'quadrature', 'addition'}
The strategy used to evaluate the CCDF.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the CCDF itself
- ``'logexp'``: evaluate the log-CCDF and exponentiate
- ``'complement'``: evaluate the CDF and take the complement
- ``'quadrature'``: numerically integrate the PDF (or, in the discrete
case, sum the PMF)
The two-argument form chooses between:
- ``'formula'``: use a formula for the CCDF itself
- ``'addition'``: compute the CDF at `x` and the CCDF at `y`, then add
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The CCDF evaluated at the provided argument(s).
See Also
--------
cdf
logccdf
Notes
-----
Suppose a continuous probability distribution has support :math:`[l, r]`.
The CCDF :math:`G(x)` is related to the probability density function
:math:`f(x)` by:
.. math::
G(x) = \int_x^r f(u) du
The two argument version is:
.. math::
G(x, y) = \int_l^x f(u) du + \int_y^r f(u) du
The CCDF returns its minimum value of :math:`0` for :math:`x ≥ r`
and its maximum value of :math:`1` for :math:`x ≤ l`.
Suppose a discrete probability distribution has support :math:`[l, r]`.
The CCDF :math:`G(x)` is related to the probability mass function
:math:`f(x)` by:
.. math::
G(x) = \sum_{u=\lfloor x + 1 \rfloor}^{r} f(u)
The CCDF evaluates to its minimum value of :math:`0` for :math:`x ≥ r`
and its maximum value of :math:`1` for :math:`x < l`.
The CCDF is also known as the "survival function".
References
----------
.. [1] Cumulative distribution function, *Wikipedia*,
https://en.wikipedia.org/wiki/Cumulative_distribution_function#Derived_functions
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the CCDF at the desired argument:
>>> X.ccdf(0.25)
0.25
>>> np.allclose(X.ccdf(0.25), 1-X.cdf(0.25))
True
Evaluate the complement of the cumulative probability between two arguments:
>>> X.ccdf(-0.25, 0.25) == X.cdf(-0.25) + X.ccdf(0.25)
True
""" # noqa: E501
raise NotImplementedError()
@abstractmethod
def iccdf(self, p, /, *, method):
r"""Inverse complementary cumulative distribution function.
The inverse complementary cumulative distribution function ("inverse CCDF"),
denoted :math:`G^{-1}(p)`, is the argument :math:`x` for which the
complementary cumulative distribution function :math:`G(x)` evaluates to
:math:`p`.
.. math::
G^{-1}(p) = x \quad \text{s.t.} \quad G(x) = p
When a strict "inverse" of the complementary cumulative distribution function
does not exist (e.g. discrete random variables), the "inverse CCDF" is defined
by convention as the smallest value within the support :math:`\chi` for which
:math:`G(x)` is no greater than :math:`p`.
.. math::
G^{-1}(p) = \min_\chi \quad \text{s.t.} \quad G(x) ≤ p
`iccdf` accepts `p` for :math:`p \in [0, 1]`.
Parameters
----------
p : array_like
The argument of the inverse CCDF.
method : {None, 'formula', 'complement', 'inversion'}
The strategy used to evaluate the inverse CCDF.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the inverse CCDF itself
- ``'complement'``: evaluate the inverse CDF at the
complement of `p`
- ``'inversion'``: solve numerically for the argument at which the
CCDF is equal to `p`
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The inverse CCDF evaluated at the provided argument.
Notes
-----
Suppose a probability distribution has support :math:`[l, r]`. The
inverse CCDF returns its minimum value of :math:`l` at :math:`p = 1`
and its maximum value of :math:`r` at :math:`p = 0`. Because the CCDF
has range :math:`[0, 1]`, the inverse CCDF is only defined on the
domain :math:`[0, 1]`; for :math:`p < 0` and :math:`p > 1`, ``iccdf``
returns ``nan``.
See Also
--------
icdf
ilogccdf
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the inverse CCDF at the desired argument:
>>> X.iccdf(0.25)
0.25
>>> np.allclose(X.iccdf(0.25), X.icdf(1-0.25))
True
This function returns NaN when the argument is outside the domain.
>>> X.iccdf([-0.1, 0, 1, 1.1])
array([ nan, 0.5, -0.5, nan])
"""
raise NotImplementedError()
@abstractmethod
def logcdf(self, x, y, /, *, method):
r"""Log of the cumulative distribution function
The cumulative distribution function ("CDF"), denoted :math:`F(x)`, is
the probability the random variable :math:`X` will assume a value
less than or equal to :math:`x`:
.. math::
F(x) = P(X ≤ x)
A two-argument variant of this function is also defined as the
probability the random variable :math:`X` will assume a value between
:math:`x` and :math:`y`.
.. math::
F(x, y) = P(x ≤ X ≤ y)
`logcdf` computes the logarithm of the cumulative distribution function
("log-CDF"), :math:`\log(F(x))`/:math:`\log(F(x, y))`, but it may be
numerically favorable compared to the naive implementation (computing
the CDF and taking the logarithm).
`logcdf` accepts `x` for :math:`x` and `y` for :math:`y`.
Parameters
----------
x, y : array_like
The arguments of the log-CDF. `x` is required; `y` is optional.
method : {None, 'formula', 'logexp', 'complement', 'quadrature', 'subtraction'}
The strategy used to evaluate the log-CDF.
By default (``None``), the one-argument form of the function
chooses between the following options, listed in order of precedence.
- ``'formula'``: use a formula for the log-CDF itself
- ``'logexp'``: evaluate the CDF and take the logarithm
- ``'complement'``: evaluate the log-CCDF and take the
logarithmic complement (see Notes)
- ``'quadrature'``: numerically log-integrate the log-PDF (or, in the
discrete case, log-sum the log-PMF)
In place of ``'complement'``, the two-argument form accepts:
- ``'subtraction'``: compute the log-CDF at each argument and take
the logarithmic difference (see Notes)
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The log-CDF evaluated at the provided argument(s).
See Also
--------
cdf
logccdf
Notes
-----
Suppose a continuous probability distribution has support :math:`[l, r]`.
The log-CDF evaluates to its minimum value of :math:`\log(0) = -\infty`
for :math:`x ≤ l` and its maximum value of :math:`\log(1) = 0` for
:math:`x ≥ r`. An analogous statement can be made for discrete distributions,
but the inequality governing the minimum value is strict.
For distributions with infinite support, it is common for
`cdf` to return a value of ``0`` when the argument
is theoretically within the support; this can occur because the true value
of the CDF is too small to be represented by the chosen dtype. `logcdf`,
however, will often return a finite (not ``-inf``) result over a much larger
domain. Similarly, `logcdf` may provided a strictly negative result with
arguments for which `cdf` would return ``1.0``. Consequently, it may be
preferred to work with the logarithms of probabilities to avoid underflow
and related limitations of floating point numbers.
The "logarithmic complement" of a number :math:`z` is mathematically
equivalent to :math:`\log(1-\exp(z))`, but it is computed to avoid loss
of precision when :math:`\exp(z)` is nearly :math:`0` or :math:`1`.
Similarly, the term "logarithmic difference" of :math:`w` and :math:`z`
is used here to mean :math:`\log(\exp(w)-\exp(z))`.
If ``y < x``, the CDF is negative, and therefore the log-CCDF
is complex with imaginary part :math:`\pi`. For
consistency, the result of this function always has complex dtype
when `y` is provided, regardless of the value of the imaginary part.
References
----------
.. [1] Cumulative distribution function, *Wikipedia*,
https://en.wikipedia.org/wiki/Cumulative_distribution_function
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the log-CDF at the desired argument:
>>> X.logcdf(0.25)
-0.287682072451781
>>> np.allclose(X.logcdf(0.), np.log(X.cdf(0.)))
True
""" # noqa: E501
raise NotImplementedError()
@abstractmethod
def ilogcdf(self, logp, /, *, method):
r"""Inverse of the logarithm of the cumulative distribution function.
The inverse of the logarithm of the cumulative distribution function
("inverse log-CDF") is the argument :math:`x` for which the logarithm
of the cumulative distribution function :math:`\log(F(x))` evaluates
to :math:`\log(p)`.
Mathematically, it is equivalent to :math:`F^{-1}(\exp(y))`, where
:math:`y = \log(p)`, but it may be numerically favorable compared to
the naive implementation (computing :math:`p = \exp(y)`, then
:math:`F^{-1}(p)`).
`ilogcdf` accepts `logp` for :math:`\log(p) ≤ 0`.
Parameters
----------
logp : array_like
The argument of the inverse log-CDF.
method : {None, 'formula', 'complement', 'inversion'}
The strategy used to evaluate the inverse log-CDF.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the inverse log-CDF itself
- ``'complement'``: evaluate the inverse log-CCDF at the
logarithmic complement of `logp` (see Notes)
- ``'inversion'``: solve numerically for the argument at which the
log-CDF is equal to `logp`
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The inverse log-CDF evaluated at the provided argument.
See Also
--------
icdf
logcdf
Notes
-----
Suppose a probability distribution has support :math:`[l, r]`.
The inverse log-CDF returns its minimum value of :math:`l` at
:math:`\log(p) = \log(0) = -\infty` and its maximum value of :math:`r` at
:math:`\log(p) = \log(1) = 0`. Because the log-CDF has range
:math:`[-\infty, 0]`, the inverse log-CDF is only defined on the
negative reals; for :math:`\log(p) > 0`, `ilogcdf` returns ``nan``.
Occasionally, it is needed to find the argument of the CDF for which
the resulting probability is very close to ``0`` or ``1`` - too close to
represent accurately with floating point arithmetic. In many cases,
however, the *logarithm* of this resulting probability may be
represented in floating point arithmetic, in which case this function
may be used to find the argument of the CDF for which the *logarithm*
of the resulting probability is :math:`y = \log(p)`.
The "logarithmic complement" of a number :math:`z` is mathematically
equivalent to :math:`\log(1-\exp(z))`, but it is computed to avoid loss
of precision when :math:`\exp(z)` is nearly :math:`0` or :math:`1`.
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the inverse log-CDF at the desired argument:
>>> X.ilogcdf(-0.25)
0.2788007830714034
>>> np.allclose(X.ilogcdf(-0.25), X.icdf(np.exp(-0.25)))
True
"""
raise NotImplementedError()
@abstractmethod
def logccdf(self, x, y, /, *, method):
r"""Log of the complementary cumulative distribution function
The complementary cumulative distribution function ("CCDF"), denoted
:math:`G(x)` is the complement of the cumulative distribution function
:math:`F(x)`; i.e., probability the random variable :math:`X` will
assume a value greater than :math:`x`:
.. math::
G(x) = 1 - F(x) = P(X > x)
A two-argument variant of this function is:
.. math::
G(x, y) = 1 - F(x, y) = P(X < x \quad \text{or} \quad X > y)
`logccdf` computes the logarithm of the complementary cumulative
distribution function ("log-CCDF"), :math:`\log(G(x))`/:math:`\log(G(x, y))`,
but it may be numerically favorable compared to the naive implementation
(computing the CDF and taking the logarithm).
`logccdf` accepts `x` for :math:`x` and `y` for :math:`y`.
Parameters
----------
x, y : array_like
The arguments of the log-CCDF. `x` is required; `y` is optional.
method : {None, 'formula', 'logexp', 'complement', 'quadrature', 'addition'}
The strategy used to evaluate the log-CCDF.
By default (``None``), the one-argument form of the function
chooses between the following options, listed in order of precedence.
- ``'formula'``: use a formula for the log CCDF itself
- ``'logexp'``: evaluate the CCDF and take the logarithm
- ``'complement'``: evaluate the log-CDF and take the
logarithmic complement (see Notes)
- ``'quadrature'``: numerically log-integrate the log-PDF (or, in the
discrete case, log-sum the log-PMF)
The two-argument form chooses between:
- ``'formula'``: use a formula for the log CCDF itself
- ``'addition'``: compute the log-CDF at `x` and the log-CCDF at `y`,
then take the logarithmic sum (see Notes)
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The log-CCDF evaluated at the provided argument(s).
See Also
--------
ccdf
logcdf
Notes
-----
Suppose a continuous probability distribution has support :math:`[l, r]`.
The log-CCDF returns its minimum value of :math:`\log(0)=-\infty` for
:math:`x ≥ r` and its maximum value of :math:`\log(1) = 0` for
:math:`x ≤ l`. An analogous statement can be made for discrete distributions,
but the inequality governing the maximum value is strict.
For distributions with infinite support, it is common for
`ccdf` to return a value of ``0`` when the argument
is theoretically within the support; this can occur because the true value
of the CCDF is too small to be represented by the chosen dtype. The log
of the CCDF, however, will often be finite (not ``-inf``) over a much larger
domain. Similarly, `logccdf` may provided a strictly negative result with
arguments for which `ccdf` would return ``1.0``. Consequently, it may be
preferred to work with the logarithms of probabilities to avoid underflow
and related limitations of floating point numbers.
The "logarithmic complement" of a number :math:`z` is mathematically
equivalent to :math:`\log(1-\exp(z))`, but it is computed to avoid loss
of precision when :math:`\exp(z)` is nearly :math:`0` or :math:`1`.
Similarly, the term "logarithmic sum" of :math:`w` and :math:`z`
is used here to mean the :math:`\log(\exp(w)+\exp(z))`, AKA
:math:`\text{LogSumExp}(w, z)`.
References
----------
.. [1] Cumulative distribution function, *Wikipedia*,
https://en.wikipedia.org/wiki/Cumulative_distribution_function#Derived_functions
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the log-CCDF at the desired argument:
>>> X.logccdf(0.25)
-1.3862943611198906
>>> np.allclose(X.logccdf(0.), np.log(X.ccdf(0.)))
True
""" # noqa: E501
raise NotImplementedError()
@abstractmethod
def ilogccdf(self, logp, /, *, method):
r"""Inverse of the log of the complementary cumulative distribution function.
The inverse of the logarithm of the complementary cumulative distribution
function ("inverse log-CCDF") is the argument :math:`x` for which the logarithm
of the complementary cumulative distribution function :math:`\log(G(x))`
evaluates to :math:`\log(p)`.
Mathematically, it is equivalent to :math:`G^{-1}(\exp(y))`, where
:math:`y = \log(p)`, but it may be numerically favorable compared to the naive
implementation (computing :math:`p = \exp(y)`, then :math:`G^{-1}(p)`).
`ilogccdf` accepts `logp` for :math:`\log(p) ≤ 0`.
Parameters
----------
x : array_like
The argument of the inverse log-CCDF.
method : {None, 'formula', 'complement', 'inversion'}
The strategy used to evaluate the inverse log-CCDF.
By default (``None``), the infrastructure chooses between the
following options, listed in order of precedence.
- ``'formula'``: use a formula for the inverse log-CCDF itself
- ``'complement'``: evaluate the inverse log-CDF at the
logarithmic complement of `x` (see Notes)
- ``'inversion'``: solve numerically for the argument at which the
log-CCDF is equal to `x`
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The inverse log-CCDF evaluated at the provided argument.
Notes
-----
Suppose a probability distribution has support :math:`[l, r]`. The
inverse log-CCDF returns its minimum value of :math:`l` at
:math:`\log(p) = \log(1) = 0` and its maximum value of :math:`r` at
:math:`\log(p) = \log(0) = -\infty`. Because the log-CCDF has range
:math:`[-\infty, 0]`, the inverse log-CDF is only defined on the
negative reals; for :math:`\log(p) > 0`, `ilogccdf` returns ``nan``.
Occasionally, it is needed to find the argument of the CCDF for which
the resulting probability is very close to ``0`` or ``1`` - too close to
represent accurately with floating point arithmetic. In many cases,
however, the *logarithm* of this resulting probability may be
represented in floating point arithmetic, in which case this function
may be used to find the argument of the CCDF for which the *logarithm*
of the resulting probability is :math:`y = \log(p)`.
The "logarithmic complement" of a number :math:`z` is mathematically
equivalent to :math:`\log(1-\exp(z))`, but it is computed to avoid loss
of precision when :math:`\exp(z)` is nearly :math:`0` or :math:`1`.
See Also
--------
iccdf
ilogccdf
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-0.5, b=0.5)
Evaluate the inverse log-CCDF at the desired argument:
>>> X.ilogccdf(-0.25)
-0.2788007830714034
>>> np.allclose(X.ilogccdf(-0.25), X.iccdf(np.exp(-0.25)))
True
"""
raise NotImplementedError()
@abstractmethod
def logentropy(self, *, method):
r"""Logarithm of the differential entropy
In terms of probability density function :math:`f(x)` and support
:math:`\chi`, the differential entropy (or simply "entropy") of a
continuous random variable :math:`X` is:
.. math::
h(X) = - \int_{\chi} f(x) \log f(x) dx
The definition for a discrete random variable is analogous, with the PMF
replacing the PDF and a sum over the support replacing the integral.
`logentropy` computes the logarithm of the differential entropy
("log-entropy"), :math:`\log(h(X))`, but it may be numerically favorable
compared to the naive implementation (computing :math:`h(X)` then
taking the logarithm).
Parameters
----------
method : {None, 'formula', 'logexp', 'quadrature}
The strategy used to evaluate the log-entropy. By default
(``None``), the infrastructure chooses between the following options,
listed in order of precedence.
- ``'formula'``: use a formula for the log-entropy itself
- ``'logexp'``: evaluate the entropy and take the logarithm
- ``'quadrature'``: numerically log-integrate (or, in the discrete
case, log-sum) the logarithm of the entropy integrand (summand)
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The log-entropy.
See Also
--------
entropy
logpdf
Notes
-----
The differential entropy of a continuous distribution can be negative.
In this case, the log-entropy is complex with imaginary part :math:`\pi`.
For consistency, the result of this function always has complex dtype,
regardless of the value of the imaginary part.
References
----------
.. [1] Differential entropy, *Wikipedia*,
https://en.wikipedia.org/wiki/Differential_entropy
Examples
--------
Instantiate a distribution with the desired parameters:
>>> import numpy as np
>>> from scipy import stats
>>> X = stats.Uniform(a=-1., b=1.)
Evaluate the log-entropy:
>>> X.logentropy()
(-0.3665129205816642+0j)
>>> np.allclose(np.exp(X.logentropy()), X.entropy())
True
For a random variable with negative entropy, the log-entropy has an
imaginary part equal to `np.pi`.
>>> X = stats.Uniform(a=-.1, b=.1)
>>> X.entropy(), X.logentropy()
(-1.6094379124341007, (0.4758849953271105+3.141592653589793j))
"""
raise NotImplementedError()
@abstractmethod
def entropy(self, *, method):
r"""Differential entropy
In terms of probability density function :math:`f(x)` and support
:math:`\chi`, the differential entropy (or simply "entropy") of a
continuous random variable :math:`X` is:
.. math::
h(X) = - \int_{\chi} f(x) \log f(x) dx
The definition for a discrete random variable is analogous, with the
PMF replacing the PDF and a sum over the support replacing the integral.
Parameters
----------
method : {None, 'formula', 'logexp', 'quadrature'}
The strategy used to evaluate the entropy. By default (``None``),
the infrastructure chooses between the following options, listed
in order of precedence.
- ``'formula'``: use a formula for the entropy itself
- ``'logexp'``: evaluate the log-entropy and exponentiate
- ``'quadrature'``: numerically integrate (or, in the discrete
case, sum) the entropy integrand (summand)
Not all `method` options are available for all distributions.
If the selected `method` is not available, a ``NotImplementedError``
will be raised.
Returns
-------
out : array
The entropy of the random variable.
See Also
--------
logentropy
pdf
Notes
-----
This function calculates the entropy using the natural logarithm; i.e.
the logarithm with base :math:`e`. Consequently, the value is expressed
in (dimensionless) "units" of nats. To convert the entropy to different
units (i.e. corresponding with a different base), divide the result by
the natural logarithm of the desired base.
References
----------
.. [1] Differential entropy, *Wikipedia*,
https://en.wikipedia.org/wiki/Differential_entropy
Examples
--------
Instantiate a distribution with the desired parameters:
>>> from scipy import stats
>>> X = stats.Uniform(a=-1., b=1.)
Evaluate the entropy:
>>> X.entropy()
0.6931471805599454
"""
raise NotImplementedError()
|
_ProbabilityDistribution
|
python
|
ansible__ansible
|
test/integration/targets/collections/collections/ansible_collections/testns/content_adj/plugins/inventory/statichost.py
|
{
"start": 646,
"end": 2156
}
|
class ____(BaseInventoryPlugin, Cacheable):
NAME = 'testns.content_adj.statichost'
def __init__(self):
super(InventoryModule, self).__init__()
self._hosts = set()
def verify_file(self, path):
""" Verify if file is usable by this plugin, base does minimal accessibility check """
if not path.endswith('.statichost.yml') and not path.endswith('.statichost.yaml'):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
# Initialize and validate options
self._read_config_data(path)
self.load_cache_plugin()
# Exercise cache
cache_key = self.get_cache_key(path)
attempt_to_read_cache = self.get_option('cache') and cache
cache_needs_update = self.get_option('cache') and not cache
if attempt_to_read_cache:
try:
host_to_add = self._cache[cache_key]
except KeyError:
cache_needs_update = True
if not attempt_to_read_cache or cache_needs_update:
host_to_add = self.get_option('hostname')
# this is where the magic happens
self.inventory.add_host(host_to_add, 'all')
self._cache[cache_key] = host_to_add
# self.inventory.add_group()...
# self.inventory.add_child()...
# self.inventory.set_variable()..
|
InventoryModule
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/mysql/test_dialect.py
|
{
"start": 16025,
"end": 18654
}
|
class ____(fixtures.TablesTest):
"""This test exists because we removed the MySQL dialect's
override of the UTC_TIMESTAMP() function, where the commit message
for this feature stated that "it caused problems with executemany()".
Since no example was provided, we are trying lots of combinations
here.
[ticket:3966]
"""
__only_on__ = "mysql", "mariadb"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("data", DateTime),
)
Table(
"t_default",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
Column("idata", DateTime, default=func.utc_timestamp()),
Column("udata", DateTime, onupdate=func.utc_timestamp()),
)
def test_insert_executemany(self, connection):
conn = connection
conn.execute(
self.tables.t.insert().values(data=func.utc_timestamp()),
[{"x": 5}, {"x": 6}, {"x": 7}],
)
def test_update_executemany(self, connection):
conn = connection
timestamp = datetime.datetime(2015, 4, 17, 18, 5, 2)
conn.execute(
self.tables.t.insert(),
[
{"x": 5, "data": timestamp},
{"x": 6, "data": timestamp},
{"x": 7, "data": timestamp},
],
)
conn.execute(
self.tables.t.update()
.values(data=func.utc_timestamp())
.where(self.tables.t.c.x == bindparam("xval")),
[{"xval": 5}, {"xval": 6}, {"xval": 7}],
)
def test_insert_executemany_w_default(self, connection):
conn = connection
conn.execute(
self.tables.t_default.insert(), [{"x": 5}, {"x": 6}, {"x": 7}]
)
def test_update_executemany_w_default(self, connection):
conn = connection
timestamp = datetime.datetime(2015, 4, 17, 18, 5, 2)
conn.execute(
self.tables.t_default.insert(),
[
{"x": 5, "idata": timestamp},
{"x": 6, "idata": timestamp},
{"x": 7, "idata": timestamp},
],
)
conn.execute(
self.tables.t_default.update()
.values(idata=func.utc_timestamp())
.where(self.tables.t_default.c.x == bindparam("xval")),
[{"xval": 5}, {"xval": 6}, {"xval": 7}],
)
|
RemoveUTCTimestampTest
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/base.py
|
{
"start": 2220,
"end": 4084
}
|
class ____:
"""
Base class for Variable.mutation_type. It encodes information about
1. The type of mutation Dynamo allows on the variable.
2. Whether the value represented by this variable already existed before
Dynamo tracing.
"""
def __init__(self, typ: SourceType) -> None:
# In HigherOrderOperator tracing, we need to distinguish
# between MutationTypes inside the HigherOrderOperator and
# ones outside it. For example, it is not safe to mutate
# `a` in the following example because it was constructed
# in a different scope.
#
# def f(x):
# a = 1
# def g(x):
# nonlocal a
# a = 2
# return x
# return wrap(g, x) + a
#
# We use self.scope to distinguish this.
# scope == 0: The object was an existing variable
# scope == 1: The object was created while Dynamo
# was introspecting a function
# (and no HigherOrderOps were involved)
# scope >= 2: The object was created through
# Dynamo introspection of a HigherOrderOp.
# The exact number corresponds to the level
# of nested HigherOrderOps.
if typ is SourceType.Existing:
self.scope = 0
elif typ is SourceType.New:
self.scope = current_scope_id()
else:
unimplemented(
gb_type="Unsupported SourceType",
context=f"MutationType.__init__ {self} {typ}",
explanation=f"Dynamo does not support the type `{typ}`",
hints=[
"This branch is not supposed to be reachable.",
*graph_break_hints.DYNAMO_BUG,
],
)
|
MutationType
|
python
|
apache__airflow
|
airflow-core/tests/unit/serialization/test_dag_dependency.py
|
{
"start": 901,
"end": 1615
}
|
class ____:
@pytest.mark.parametrize("dep_type", ("asset", "asset-alias", "asset-name-ref", "asset-uri-ref"))
def test_node_id_with_asset(self, dep_type):
dag_dep = DagDependency(
source=dep_type,
target="target",
label="label",
dependency_type=dep_type,
dependency_id="id",
)
assert dag_dep.node_id == f"{dep_type}:id"
def test_node_id(self):
dag_dep = DagDependency(
source="source",
target="target",
label="label",
dependency_type="trigger",
dependency_id="id",
)
assert dag_dep.node_id == "trigger:source:target:id"
|
TestDagDependency
|
python
|
pytorch__pytorch
|
torch/_inductor/exc.py
|
{
"start": 3314,
"end": 3366
}
|
class ____(CppCompileError):
pass
|
CUDACompileError
|
python
|
doocs__leetcode
|
solution/1100-1199/1130.Minimum Cost Tree From Leaf Values/Solution.py
|
{
"start": 0,
"end": 519
}
|
class ____:
def mctFromLeafValues(self, arr: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> Tuple:
if i == j:
return 0, arr[i]
s, mx = inf, -1
for k in range(i, j):
s1, mx1 = dfs(i, k)
s2, mx2 = dfs(k + 1, j)
t = s1 + s2 + mx1 * mx2
if s > t:
s = t
mx = max(mx1, mx2)
return s, mx
return dfs(0, len(arr) - 1)[0]
|
Solution
|
python
|
mlflow__mlflow
|
docs/api_reference/source/languagesections/__init__.py
|
{
"start": 226,
"end": 590
}
|
class ____(Directive):
has_content = True
def run(self):
self.assert_has_content()
text = "\n".join(self.content)
node = nodes.container(text)
node["classes"].append("code-section")
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
|
CodeSectionDirective
|
python
|
walkccc__LeetCode
|
solutions/2587. Rearrange Array to Maximize Prefix Score/2587.py
|
{
"start": 0,
"end": 161
}
|
class ____:
def maxScore(self, nums: list[int]) -> int:
return sum(num > 0
for num in itertools.accumulate(sorted(nums, reverse=True)))
|
Solution
|
python
|
google__jax
|
tests/tree_util_test.py
|
{
"start": 6262,
"end": 7999
}
|
class ____:
x: tuple[int, int]
y: int
z: int = dataclasses.field(metadata={"static": True})
TREES += (
(ADataclass(x=(1, 2), y=3),),
(ADataclassWithMeta(x=(1, 2), y=3, z=4),),
)
TREE_STRINGS += (
"PyTreeDef(CustomNode(ADataclass[()], [(*, *), *]))",
"PyTreeDef(CustomNode(ADataclassWithMeta[(4,)], [(*, *), *]))",
)
TREES += (
(collections.OrderedDict([("foo", 34), ("baz", 101), ("something", -42)]),),
(
collections.defaultdict(
dict, [("foo", 34), ("baz", 101), ("something", -42)]
),
),
(ANamedTupleSubclass(foo="hello", bar=3.5),),
(FlatCache(None),),
(FlatCache(1),),
(FlatCache({"a": [1, 2]}),),
(BlackBox(value=2),),
)
# pytest expects "tree_util_test.ATuple"
STRS = []
for tree_str in TREE_STRINGS:
tree_str = re.escape(tree_str)
tree_str = tree_str.replace("__main__", ".*")
STRS.append(tree_str)
TREE_STRINGS = STRS
LEAVES = (
("foo",),
(0.1,),
(1,),
(object(),),
)
# All except those decorated by register_pytree_node_class
TREES_WITH_KEYPATH = (
(None,),
((None,),),
((),),
(([()]),),
((1, 0),),
(((1, "foo"), ["bar", (3, None, 7)]),),
([3],),
([3, ATuple(foo=(3, ATuple(foo=3, bar=None)), bar={"baz": 34})],),
([AnObject2(3, None, [4, "foo"])],),
(SpecialWithKeys(2, 3.),),
({"a": 1, "b": 0},),
(collections.OrderedDict([("foo", 34), ("baz", 101), ("something", -42)]),),
(collections.defaultdict(dict,
[("foo", 34), ("baz", 101), ("something", -42)]),),
(ANamedTupleSubclass(foo="hello", bar=3.5),),
(StaticInt(1),),
(StaticTuple((2, 3)),),
(StaticDict(foo=4, bar=5),),
(BlackBox(value=2),),
)
|
ADataclassWithMeta
|
python
|
django__django
|
tests/expressions_window/models.py
|
{
"start": 31,
"end": 112
}
|
class ____(models.Model):
code = models.CharField(max_length=10)
|
Classification
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py
|
{
"start": 13517,
"end": 14024
}
|
class ____:
projects: list["DgWorkspaceProjectSpec"]
scaffold_project_options: "DgWorkspaceScaffoldProjectOptions"
@classmethod
def from_raw(cls, raw: "DgRawWorkspaceConfig") -> Self:
projects = [DgWorkspaceProjectSpec.from_raw(spec) for spec in raw.get("projects", [])]
scaffold_project_options = DgWorkspaceScaffoldProjectOptions.from_raw(
raw.get("scaffold_project_options", {})
)
return cls(projects, scaffold_project_options)
|
DgWorkspaceConfig
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 108378,
"end": 109465
}
|
class ____(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example:
.. doctest::
>>> CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
ParseResults(['CMD', 'CMD', 'CMD'], {})
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, match_string: str = "", **kwargs) -> None:
matchString: str = deprecate_argument(kwargs, "matchString", "")
match_string = matchString or match_string
super().__init__(match_string.upper())
# Preserve the defining literal.
self.returnString = match_string
self.errmsg = f"Expected {self.name}"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
|
CaselessLiteral
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.