language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 6390,
"end": 6892
} | class ____(logging.StreamHandler['SafeEncodingWriter']):
"""StreamHandler which switches line terminator by record.nonl flag."""
def emit(self, record: logging.LogRecord) -> None:
try:
self.acquire()
if getattr(record, 'nonl', False):
# skip appending terminator when nonl=True
self.terminator = ''
super().emit(record)
finally:
self.terminator = '\n'
self.release()
| NewLineStreamHandler |
python | keras-team__keras | keras/src/layers/attention/additive_attention_test.py | {
"start": 81,
"end": 3030
} | class ____(testing.TestCase):
def test_attention_basics(self):
# No scale
self.run_layer_test(
layers.AdditiveAttention,
init_kwargs={
"use_scale": True,
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
# With scale.
self.run_layer_test(
layers.AdditiveAttention,
init_kwargs={
"use_scale": False,
"dropout": 0.5,
},
input_shape=[(2, 3, 4), (2, 4, 4)],
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
run_training_check=False,
)
def test_attention_correctness(self):
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
key = np.array([[[0.0, 1.0], [1.0, 0.0]]])
value = np.array([[[1.0, 2.0], [3.0, 4.0]]])
layer = layers.AdditiveAttention(use_scale=False)
output, scores = layer(
[query, value, key],
return_attention_scores=True,
)
self.assertAllClose(
output, [[[1.727, 2.727], [2.272, 3.272]]], atol=1e-3
)
self.assertAllClose(
scores, [[[0.636, 0.363], [0.363, 0.636]]], atol=1e-3
)
def test_attention_with_mask(self):
layer = layers.AdditiveAttention(use_scale=False)
query = np.array([[[1.0, 0.0], [0.0, 1.0]]])
value = np.array([[[1.0, 1.0], [1.0, 1.0]]])
query_mask = np.array([[True, False]])
value_mask = np.array([[True, False]])
output, scores = layer(
[query, value],
mask=[query_mask, value_mask],
return_attention_scores=True,
)
self.assertAllClose(output, [[[1.0, 1.0], [0.0, 0.0]]])
self.assertAllClose(scores, [[[1.0, 0.0], [1.0, 0.0]]])
def test_attention_errors(self):
layer = layers.AdditiveAttention()
tensor = np.array([[[1.0, 1.0], [1.0, 1.0]]])
with self.assertRaisesRegex(ValueError, "must be called on a list"):
layer(tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor, tensor, tensor])
with self.assertRaisesRegex(ValueError, "layer mask must be a list"):
layer([tensor, tensor], mask=tensor)
with self.assertRaisesRegex(ValueError, "length 2 or 3"):
layer([tensor, tensor], mask=[tensor])
| AdditiveAttentionTest |
python | django__django | tests/forms_tests/field_tests/test_datefield.py | {
"start": 320,
"end": 8889
} | class ____(SimpleTestCase):
def test_form_field(self):
a = GetDate({"mydate_month": "4", "mydate_day": "1", "mydate_year": "2008"})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data["mydate"], date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict(), we must
# accept the input from the "as_hidden" rendering as well.
self.assertHTMLEqual(
a["mydate"].as_hidden(),
'<input type="hidden" name="mydate" value="2008-04-01" id="id_mydate">',
)
b = GetDate({"mydate": "2008-4-1"})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data["mydate"], date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({"mydate_month": "2", "mydate_day": "31", "mydate_year": "2010"})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {"mydate": ["Enter a valid date."]})
# label tag is correctly associated with month dropdown
d = GetDate({"mydate_month": "1", "mydate_day": "1", "mydate_year": "2010"})
self.assertIn('<label for="id_mydate_month">', d.as_p())
# Inputs raising an OverflowError.
e = GetDate(
{
"mydate_month": str(sys.maxsize + 1),
"mydate_day": "31",
"mydate_year": "2010",
}
)
self.assertIs(e.is_valid(), False)
self.assertEqual(e.errors, {"mydate": ["Enter a valid date."]})
@translation.override("nl")
def test_l10n_date_changed(self):
"""
DateField.has_changed() with SelectDateWidget works with a localized
date format (#17165).
"""
# With Field.show_hidden_initial=False
b = GetDate(
{
"mydate_year": "2008",
"mydate_month": "4",
"mydate_day": "1",
},
initial={"mydate": date(2008, 4, 1)},
)
self.assertFalse(b.has_changed())
b = GetDate(
{
"mydate_year": "2008",
"mydate_month": "4",
"mydate_day": "2",
},
initial={"mydate": date(2008, 4, 1)},
)
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
b = GetDateShowHiddenInitial(
{
"mydate_year": "2008",
"mydate_month": "4",
"mydate_day": "1",
"initial-mydate": HiddenInput().format_value(date(2008, 4, 1)),
},
initial={"mydate": date(2008, 4, 1)},
)
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial(
{
"mydate_year": "2008",
"mydate_month": "4",
"mydate_day": "22",
"initial-mydate": HiddenInput().format_value(date(2008, 4, 1)),
},
initial={"mydate": date(2008, 4, 1)},
)
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial(
{
"mydate_year": "2008",
"mydate_month": "4",
"mydate_day": "22",
"initial-mydate": HiddenInput().format_value(date(2008, 4, 1)),
},
initial={"mydate": date(2008, 4, 22)},
)
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial(
{
"mydate_year": "2008",
"mydate_month": "4",
"mydate_day": "22",
"initial-mydate": HiddenInput().format_value(date(2008, 4, 22)),
},
initial={"mydate": date(2008, 4, 1)},
)
self.assertFalse(b.has_changed())
@translation.override("nl")
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({"mydate_month": "2", "mydate_day": "31", "mydate_year": "2010"})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {"mydate": ["Voer een geldige datum in."]})
@translation.override("nl")
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({"mydate_month": "1", "mydate_day": "1", "mydate_year": "2010"})
self.assertIn('<label for="id_mydate_day">', a.as_p())
def test_datefield_1(self):
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(date(2006, 10, 25), f.clean("2006-10-25"))
self.assertEqual(date(2006, 10, 25), f.clean("10/25/2006"))
self.assertEqual(date(2006, 10, 25), f.clean("10/25/06"))
self.assertEqual(date(2006, 10, 25), f.clean("Oct 25 2006"))
self.assertEqual(date(2006, 10, 25), f.clean("October 25 2006"))
self.assertEqual(date(2006, 10, 25), f.clean("October 25, 2006"))
self.assertEqual(date(2006, 10, 25), f.clean("25 October 2006"))
self.assertEqual(date(2006, 10, 25), f.clean("25 October, 2006"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("2006-4-31")
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("200a-10-25")
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("25/10/06")
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("0-0-0")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual("None", repr(f.clean(None)))
self.assertIsNone(f.clean(""))
self.assertEqual("None", repr(f.clean("")))
def test_datefield_3(self):
f = DateField(input_formats=["%Y %m %d"])
self.assertEqual(date(2006, 10, 25), f.clean(date(2006, 10, 25)))
self.assertEqual(date(2006, 10, 25), f.clean(datetime(2006, 10, 25, 14, 30)))
self.assertEqual(date(2006, 10, 25), f.clean("2006 10 25"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("2006-10-25")
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("10/25/2006")
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("10/25/06")
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(date(2006, 10, 25), f.clean(" 10/25/2006 "))
self.assertEqual(date(2006, 10, 25), f.clean(" 10/25/06 "))
self.assertEqual(date(2006, 10, 25), f.clean(" Oct 25 2006 "))
self.assertEqual(date(2006, 10, 25), f.clean(" October 25 2006 "))
self.assertEqual(date(2006, 10, 25), f.clean(" October 25, 2006 "))
self.assertEqual(date(2006, 10, 25), f.clean(" 25 October 2006 "))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean(" ")
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
with self.assertRaisesMessage(ValidationError, "'Enter a valid date.'"):
f.clean("a\x00b")
def test_datefield_changed(self):
format = "%d/%m/%Y"
f = DateField(input_formats=[format])
d = date(2007, 9, 17)
self.assertFalse(f.has_changed(d, "17/09/2007"))
def test_datefield_strptime(self):
"""field.strptime() doesn't raise a UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime("31 мая 2011", "%d-%b-%y")
except Exception as e:
# assertIsInstance or assertRaises cannot be used because
# UnicodeEncodeError is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
| DateFieldTest |
python | kamyu104__LeetCode-Solutions | Python/unit-conversion-ii.py | {
"start": 64,
"end": 967
} | class ____(object):
def queryConversions(self, conversions, queries):
"""
:type conversions: List[List[int]]
:type queries: List[List[int]]
:rtype: List[int]
"""
MOD = 10**9+7
def divmod(a, b):
return (a*pow(b, MOD-2, MOD))%MOD
def unit():
adj = [[] for _ in xrange(len(conversions)+1)]
for u, v, w in conversions:
adj[u].append((v, w))
result = [0]*len(adj)
result[0] = 1
q = [0]
while q:
new_q = []
for u in q:
for v, w in adj[u]:
result[v] = (result[u]*w)%MOD
new_q.append(v)
q = new_q
return result
lookup = unit()
return [divmod(lookup[b], lookup[a]) for a, b in queries]
| Solution |
python | getsentry__sentry | tests/sentry/taskworker/test_client.py | {
"start": 1002,
"end": 2046
} | class ____:
"""Stub for grpc service methods"""
def __init__(
self,
path: str,
responses: list[Any],
request_serializer: Callable,
response_deserializer: Callable,
):
self.path = path
self.request_serializer = request_serializer
self.response_deserializer = response_deserializer
self.responses = responses
def __call__(self, *args, **kwargs):
"""Capture calls and use registered mocks"""
# move the head to the tail
res = self.responses[0]
tail = self.responses[1:]
self.responses = tail + [res]
if isinstance(res.response, Exception):
raise res.response
return res.response
def with_call(self, *args, **kwargs):
res = self.responses[0]
if res.metadata:
assert res.metadata == kwargs.get("metadata"), "Metadata mismatch"
if isinstance(res.response, Exception):
raise res.response
return (res.response, None)
| MockServiceMethod |
python | kamyu104__LeetCode-Solutions | Python/longest-repeating-character-replacement.py | {
"start": 50,
"end": 536
} | class ____(object):
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
result, max_count = 0, 0
count = collections.Counter()
for i in xrange(len(s)):
count[s[i]] += 1
max_count = max(max_count, count[s[i]])
if result - max_count >= k:
count[s[i-result]] -= 1
else:
result += 1
return result
| Solution |
python | wandb__wandb | tests/system_tests/backend_fixtures.py | {
"start": 2692,
"end": 2745
} | class ____:
name: str
@dataclass(frozen=True)
| _Team |
python | huggingface__transformers | src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py | {
"start": 798,
"end": 3044
} | class ____(pl.LightningModule):
def __init__(self, model):
super().__init__()
self.model = model
self.num_labels = 2
self.qa_outputs = nn.Linear(self.model.config.hidden_size, self.num_labels)
# implement only because lightning requires to do so
def forward(self):
pass
def convert_longformer_qa_checkpoint_to_pytorch(
longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str
):
# load longformer model from model identifier
longformer = LongformerModel.from_pretrained(longformer_model)
lightning_model = LightningModel(longformer)
ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device("cpu"), weights_only=True)
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(pytorch_dump_folder_path)
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| LightningModel |
python | huggingface__transformers | src/transformers/models/dpr/tokenization_dpr.py | {
"start": 6742,
"end": 15041
} | class ____:
def __call__(
self,
questions,
titles: Optional[str] = None,
texts: Optional[str] = None,
padding: Union[bool, str] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_attention_mask: Optional[bool] = None,
**kwargs,
) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
questions,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
**kwargs,
)
elif titles is None or texts is None:
text_pair = titles if texts is None else texts
return super().__call__(
questions,
text_pair,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
**kwargs,
)
titles = titles if not isinstance(titles, str) else [titles]
texts = texts if not isinstance(texts, str) else [texts]
n_passages = len(titles)
questions = questions if not isinstance(questions, str) else [questions] * n_passages
if len(titles) != len(texts):
raise ValueError(
f"There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts."
)
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"]
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"]
encoded_inputs = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)
]
}
if return_attention_mask is not False:
attention_mask = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
encoded_inputs["attention_mask"] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(
self,
reader_input: BatchEncoding,
reader_output: DPRReaderOutput,
num_spans: int = 16,
max_answer_length: int = 64,
num_spans_per_passage: int = 4,
) -> list[DPRSpanPrediction]:
"""
Get the span predictions for the extractive Q&A model.
Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
*DPRReaderOutput* is a *Tuple* with:
- **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
spans in the same passage. It corresponds to the sum of the start and end logits of the span.
- **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
- **doc_id**: `int` the id of the passage. - **start_index**: `int` the start index of the span
(inclusive). - **end_index**: `int` the end index of the span (inclusive).
Examples:
```python
>>> from transformers import DPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors="pt",
... )
>>> outputs = model(**encoded_inputs)
>>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
>>> print(predicted_spans[0].text) # best span
a song
```"""
input_ids = reader_input["input_ids"]
start_logits, end_logits, relevance_logits = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: list[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len],
end_logits=end_logits[doc_id][passage_offset:sequence_len],
max_answer_length=max_answer_length,
top_spans=num_spans_per_passage,
)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],
relevance_score=relevance_logits[doc_id],
doc_id=doc_id,
start_index=start_index,
end_index=end_index,
text=self.decode(sequence_ids[start_index : end_index + 1]),
)
)
if len(nbest_spans_predictions) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(
self,
start_logits: list[int],
end_logits: list[int],
max_answer_length: int,
top_spans: int,
) -> list[DPRSpanPrediction]:
"""
Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
`span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
"""
scores = []
for start_index, start_score in enumerate(start_logits):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
chosen_span_intervals = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]")
length = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals
):
continue
chosen_span_intervals.append((start_index, end_index))
if len(chosen_span_intervals) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
| CustomDPRReaderTokenizerMixin |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 87156,
"end": 95079
} | class ____(StrategyBase):
__doc__ = StrategyBase.__doc__
def experimental_distribute_values_from_function(self, value_fn):
"""Generates `tf.distribute.DistributedValues` from `value_fn`.
This function is to generate `tf.distribute.DistributedValues` to pass
into `run`, `reduce`, or other methods that take
distributed values when not using datasets.
Args:
value_fn: The function to run to generate values. It is called for
each replica with `tf.distribute.ValueContext` as the sole argument. It
must return a Tensor or a type that can be converted to a Tensor.
Returns:
A `tf.distribute.DistributedValues` containing a value for each replica.
Example usage:
1. Return constant value per replica:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> def value_fn(ctx):
... return tf.constant(1.)
>>> distributed_values = (
... strategy.experimental_distribute_values_from_function(
... value_fn))
>>> local_result = strategy.experimental_local_results(
... distributed_values)
>>> local_result
(<tf.Tensor: shape=(), dtype=float32, numpy=1.0>,
<tf.Tensor: shape=(), dtype=float32, numpy=1.0>)
2. Distribute values in array based on replica_id: {: value=2}
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> array_value = np.array([3., 2., 1.])
>>> def value_fn(ctx):
... return array_value[ctx.replica_id_in_sync_group]
>>> distributed_values = (
... strategy.experimental_distribute_values_from_function(
... value_fn))
>>> local_result = strategy.experimental_local_results(
... distributed_values)
>>> local_result
(3.0, 2.0)
3. Specify values using num_replicas_in_sync: {: value=3}
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> def value_fn(ctx):
... return ctx.num_replicas_in_sync
>>> distributed_values = (
... strategy.experimental_distribute_values_from_function(
... value_fn))
>>> local_result = strategy.experimental_local_results(
... distributed_values)
>>> local_result
(2, 2)
4. Place values on devices and distribute: {: value=4}
```
strategy = tf.distribute.TPUStrategy()
worker_devices = strategy.extended.worker_devices
multiple_values = []
for i in range(strategy.num_replicas_in_sync):
with tf.device(worker_devices[i]):
multiple_values.append(tf.constant(1.0))
def value_fn(ctx):
return multiple_values[ctx.replica_id_in_sync_group]
distributed_values = strategy.
experimental_distribute_values_from_function(
value_fn)
```
"""
return self._extended._experimental_distribute_values_from_function( # pylint: disable=protected-access
value_fn)
def gather(self, value, axis):
# pylint: disable=line-too-long, protected-access
"""Gather `value` across replicas along `axis` to the current device.
Given a `tf.distribute.DistributedValues` or `tf.Tensor`-like
object `value`, this API gathers and concatenates `value` across replicas
along the `axis`-th dimension. The result is copied to the "current" device,
which would typically be the CPU of the worker on which the program is
running. For `tf.distribute.TPUStrategy`, it is the first TPU host. For
multi-client `tf.distribute.MultiWorkerMirroredStrategy`, this is the CPU of
each worker.
This API can only be called in the cross-replica context. For a counterpart
in the replica context, see `tf.distribute.ReplicaContext.all_gather`.
Note: For all strategies except `tf.distribute.TPUStrategy`, the input
`value` on different replicas must have the same rank, and their shapes must
be the same in all dimensions except the `axis`-th dimension. In other
words, their shapes cannot be different in a dimension `d` where `d` does
not equal to the `axis` argument. For example, given a
`tf.distribute.DistributedValues` with component tensors of shape
`(1, 2, 3)` and `(1, 3, 3)` on two replicas, you can call
`gather(..., axis=1, ...)` on it, but not `gather(..., axis=0, ...)` or
`gather(..., axis=2, ...)`. However, for `tf.distribute.TPUStrategy.gather`,
all tensors must have exactly the same rank and same shape.
Note: Given a `tf.distribute.DistributedValues` `value`, its component
tensors must have a non-zero rank. Otherwise, consider using
`tf.expand_dims` before gathering them.
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> # A DistributedValues with component tensor of shape (2, 1) on each replica
... distributed_values = strategy.experimental_distribute_values_from_function(lambda _: tf.identity(tf.constant([[1], [2]])))
>>> @tf.function
... def run():
... return strategy.gather(distributed_values, axis=0)
>>> run()
<tf.Tensor: shape=(4, 1), dtype=int32, numpy=
array([[1],
[2],
[1],
[2]], dtype=int32)>
Consider the following example for more combinations:
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1", "GPU:2", "GPU:3"])
>>> single_tensor = tf.reshape(tf.range(6), shape=(1,2,3))
>>> distributed_values = strategy.experimental_distribute_values_from_function(lambda _: tf.identity(single_tensor))
>>> @tf.function
... def run(axis):
... return strategy.gather(distributed_values, axis=axis)
>>> axis=0
>>> run(axis)
<tf.Tensor: shape=(4, 2, 3), dtype=int32, numpy=
array([[[0, 1, 2],
[3, 4, 5]],
[[0, 1, 2],
[3, 4, 5]],
[[0, 1, 2],
[3, 4, 5]],
[[0, 1, 2],
[3, 4, 5]]], dtype=int32)>
>>> axis=1
>>> run(axis)
<tf.Tensor: shape=(1, 8, 3), dtype=int32, numpy=
array([[[0, 1, 2],
[3, 4, 5],
[0, 1, 2],
[3, 4, 5],
[0, 1, 2],
[3, 4, 5],
[0, 1, 2],
[3, 4, 5]]], dtype=int32)>
>>> axis=2
>>> run(axis)
<tf.Tensor: shape=(1, 2, 12), dtype=int32, numpy=
array([[[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5, 3, 4, 5]]], dtype=int32)>
Args:
value: a `tf.distribute.DistributedValues` instance, e.g. returned by
`Strategy.run`, to be combined into a single tensor. It can also be a
regular tensor when used with `tf.distribute.OneDeviceStrategy` or the
default strategy. The tensors that constitute the DistributedValues
can only be dense tensors with non-zero rank, NOT a `tf.IndexedSlices`.
axis: 0-D int32 Tensor. Dimension along which to gather. Must be in the
range [0, rank(value)).
Returns:
A `Tensor` that's the concatenation of `value` across replicas along
`axis` dimension.
"""
# pylint: enable=line-too-long
error_message = ("tf.distribute.Strategy.gather method requires "
"cross-replica context, use "
"get_replica_context().all_gather() instead")
_require_cross_replica_or_default_context_extended(self._extended,
error_message)
dst = device_util.current(
) or self._extended._default_device or "/device:CPU:0"
if isinstance(value, indexed_slices.IndexedSlices):
raise NotImplementedError("gather does not support IndexedSlices")
return self._extended._local_results(
self._extended._gather_to(value, dst, axis))[0]
# TF v1.x version has additional deprecated APIs
@tf_export(v1=["distribute.Strategy"])
| Strategy |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/auth_manager/cli/test_avp_commands.py | {
"start": 1326,
"end": 5577
} | class ____:
def setup_method(self):
mock_boto3.reset_mock()
@classmethod
def setup_class(cls):
with conf_vars(
{
(
"core",
"auth_manager",
): "airflow.providers.amazon.aws.auth_manager.aws_auth_manager.AwsAuthManager"
}
):
importlib.reload(cli_parser)
cls.arg_parser = cli_parser.get_parser()
@pytest.mark.parametrize(
"dry_run",
[False, True],
)
@patch("airflow.providers.amazon.aws.auth_manager.cli.avp_commands._get_client")
def test_init_avp_with_no_existing_resources(self, mock_get_client, dry_run):
mock_get_client.return_value = mock_boto3
policy_store_description = "test-policy-store"
policy_store_id = "test-policy-store-id"
paginator = Mock()
paginator.paginate.return_value = []
mock_boto3.get_paginator.return_value = paginator
mock_boto3.create_policy_store.return_value = {"policyStoreId": policy_store_id}
with conf_vars({("database", "check_migrations"): "False"}):
params = [
"aws-auth-manager",
"init-avp",
"--policy-store-description",
policy_store_description,
]
if dry_run:
params.append("--dry-run")
init_avp(self.arg_parser.parse_args(params))
if dry_run:
mock_boto3.create_policy_store.assert_not_called()
mock_boto3.put_schema.assert_not_called()
else:
mock_boto3.create_policy_store.assert_called_once_with(
validationSettings={
"mode": "STRICT",
},
description=policy_store_description,
)
mock_boto3.put_schema.assert_called_once_with(
policyStoreId=policy_store_id,
definition={
"cedarJson": ANY,
},
)
@pytest.mark.parametrize(
"dry_run",
[False, True],
)
@patch("airflow.providers.amazon.aws.auth_manager.cli.avp_commands._get_client")
def test_init_avp_with_existing_resources(self, mock_get_client, dry_run):
mock_get_client.return_value = mock_boto3
policy_store_description = "test-policy-store"
policy_store_id = "test-policy-store-id"
paginator = Mock()
paginator.paginate.return_value = [
{"policyStores": [{"description": policy_store_description, "policyStoreId": policy_store_id}]}
]
mock_boto3.get_paginator.return_value = paginator
with conf_vars({("database", "check_migrations"): "False"}):
params = [
"aws-auth-manager",
"init-avp",
"--policy-store-description",
policy_store_description,
]
if dry_run:
params.append("--dry-run")
init_avp(self.arg_parser.parse_args(params))
mock_boto3.create_policy_store.assert_not_called()
mock_boto3.update_policy_store.assert_not_called()
mock_boto3.put_schema.assert_not_called()
@pytest.mark.parametrize(
"dry_run",
[False, True],
)
@patch("airflow.providers.amazon.aws.auth_manager.cli.avp_commands._get_client")
def test_update_schema(self, mock_get_client, dry_run):
mock_get_client.return_value = mock_boto3
policy_store_id = "test-policy-store-id"
with conf_vars({("database", "check_migrations"): "False"}):
params = [
"aws-auth-manager",
"update-avp-schema",
"--policy-store-id",
policy_store_id,
]
if dry_run:
params.append("--dry-run")
update_schema(self.arg_parser.parse_args(params))
if dry_run:
mock_boto3.put_schema.assert_not_called()
else:
mock_boto3.put_schema.assert_called_once_with(
policyStoreId=policy_store_id,
definition={
"cedarJson": ANY,
},
)
| TestAvpCommands |
python | FactoryBoy__factory_boy | tests/test_docs_internals.py | {
"start": 1836,
"end": 2071
} | class ____:
ACTIONS = ['create', 'update', 'disable']
def __init__(self, user, action, timestamp):
self.user = user
self.action = action
self.timestamp = timestamp
user.logs.append(self)
| UserLog |
python | plotly__plotly.py | plotly/graph_objs/histogram/marker/_colorbar.py | {
"start": 233,
"end": 61680
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram.marker"
_path_str = "histogram.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"x",
"xanchor",
"xpad",
"xref",
"y",
"yanchor",
"ypad",
"yref",
}
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.histogram.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.histogram.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.histogram.mark
er.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
histogram.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.histogram.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def x(self):
"""
Sets the x position with respect to `xref` of the color bar (in
plot fraction). When `xref` is "paper", defaults to 1.02 when
`orientation` is "v" and 0.5 when `orientation` is "h". When
`xref` is "container", defaults to 1 when `orientation` is "v"
and 0.5 when `orientation` is "h". Must be between 0 and 1 if
`xref` is "container" and between "-2" and 3 if `xref` is
"paper".
The 'x' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
@property
def xref(self):
"""
Sets the container `x` refers to. "container" spans the entire
`width` of the plot. "paper" refers to the width of the
plotting area only.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y(self):
"""
Sets the y position with respect to `yref` of the color bar (in
plot fraction). When `yref` is "paper", defaults to 0.5 when
`orientation` is "v" and 1.02 when `orientation` is "h". When
`yref` is "container", defaults to 0.5 when `orientation` is
"v" and 1 when `orientation` is "h". Must be between 0 and 1 if
`yref` is "container" and between "-2" and 3 if `yref` is
"paper".
The 'y' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
@property
def yref(self):
"""
Sets the container `y` refers to. "container" spans the entire
`height` of the plot. "paper" refers to the height of the
plotting area only.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['container', 'paper']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram.marker.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
x=None,
xanchor=None,
xpad=None,
xref=None,
y=None,
yanchor=None,
ypad=None,
yref=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogram.marke
r.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.histog
ram.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
histogram.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram.marker.colorbar.
Title` instance or dict with compatible properties
x
Sets the x position with respect to `xref` of the color
bar (in plot fraction). When `xref` is "paper",
defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h". When `xref` is "container",
defaults to 1 when `orientation` is "v" and 0.5 when
`orientation` is "h". Must be between 0 and 1 if `xref`
is "container" and between "-2" and 3 if `xref` is
"paper".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
xref
Sets the container `x` refers to. "container" spans the
entire `width` of the plot. "paper" refers to the width
of the plotting area only.
y
Sets the y position with respect to `yref` of the color
bar (in plot fraction). When `yref` is "paper",
defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h". When `yref` is "container",
defaults to 0.5 when `orientation` is "v" and 1 when
`orientation` is "h". Must be between 0 and 1 if `yref`
is "container" and between "-2" and 3 if `yref` is
"paper".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
yref
Sets the container `y` refers to. "container" spans the
entire `height` of the plot. "paper" refers to the
height of the plotting area only.
Returns
-------
ColorBar
"""
super().__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.marker.ColorBar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("borderwidth", arg, borderwidth)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("len", arg, len)
self._set_property("lenmode", arg, lenmode)
self._set_property("minexponent", arg, minexponent)
self._set_property("nticks", arg, nticks)
self._set_property("orientation", arg, orientation)
self._set_property("outlinecolor", arg, outlinecolor)
self._set_property("outlinewidth", arg, outlinewidth)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("thickness", arg, thickness)
self._set_property("thicknessmode", arg, thicknessmode)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklabeloverflow", arg, ticklabeloverflow)
self._set_property("ticklabelposition", arg, ticklabelposition)
self._set_property("ticklabelstep", arg, ticklabelstep)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("x", arg, x)
self._set_property("xanchor", arg, xanchor)
self._set_property("xpad", arg, xpad)
self._set_property("xref", arg, xref)
self._set_property("y", arg, y)
self._set_property("yanchor", arg, yanchor)
self._set_property("ypad", arg, ypad)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| ColorBar |
python | huggingface__transformers | src/transformers/models/mbart/tokenization_mbart.py | {
"start": 1333,
"end": 10252
} | class ____(TokenizersBackend):
"""
Construct an MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
<tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import MBartTokenizer
>>> tokenizer = MBartTokenizer.from_pretrained(
... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(
self,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
src_lang=None,
tgt_lang=None,
additional_special_tokens=None,
vocab=None,
merges=None, # Ignored for Unigram
vocab_file=None,
**kwargs,
):
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
_additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens]
)
# MBart uses fairseq vocab alignment: <s>=0, <pad>=1, </s>=2, <unk>=3, then SPM pieces[3:], lang codes, <mask>
if vocab is not None:
# Handle different vocab formats (dict, list of tokens, or list of tuples)
# SentencePieceExtractor returns list[tuple[str, float]] which is the expected format
if isinstance(vocab, dict):
vocab = [(token, 0.0) for token in vocab.keys()]
elif isinstance(vocab, list) and len(vocab) > 0:
if not isinstance(vocab[0], tuple):
vocab = [(token, 0.0) for token in vocab]
else:
# Ensure tuples are (str, float) format
vocab = [(str(item[0]), float(item[1])) for item in vocab]
# Reorder to fairseq: <s>, <pad>, </s>, <unk>, ... (rest of vocab from SPM[3:])
vocab_list = []
vocab_list.append((str(bos_token), 0.0))
vocab_list.append((str(pad_token), 0.0))
vocab_list.append((str(eos_token), 0.0))
vocab_list.append((str(unk_token), 0.0))
# Add the rest of the SentencePiece vocab (skipping first 3: <unk>, <s>, </s>)
vocab_list.extend(vocab[4:])
# Add language codes
for lang_code in FAIRSEQ_LANGUAGE_CODES:
vocab_list.append((str(lang_code), 0.0))
# Add mask token
vocab_list.append((str(mask_token), 0.0))
self._vocab_scores = vocab_list
else:
self._vocab_scores = [
(str(bos_token), 0.0),
(str(pad_token), 0.0),
(str(eos_token), 0.0),
(str(unk_token), 0.0),
("▁", -2.0),
]
for lang_code in FAIRSEQ_LANGUAGE_CODES:
self._vocab_scores.append((lang_code, 0.0))
self._vocab_scores.append((str(mask_token), 0.0))
self._tokenizer = Tokenizer(
Unigram(
self._vocab_scores,
unk_id=3,
byte_fallback=False,
)
)
self._tokenizer.normalizer = None
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.WhitespaceSplit(),
pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="always", split=True),
]
)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="always", split=True)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
src_lang=src_lang,
tgt_lang=tgt_lang,
additional_special_tokens=_additional_special_tokens,
**kwargs,
)
self.lang_code_to_id = {
lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
self.fairseq_offset = 1
# Build fairseq token mappings for backward compatibility
self.fairseq_tokens_to_ids = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
self.fairseq_tokens_to_ids["<mask>"] = self.convert_tokens_to_ids(str(mask_token))
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
self._src_lang = src_lang if src_lang is not None else "en_XX"
self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
self.tgt_lang = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
if self.tgt_lang is None:
self.tgt_lang = self._src_lang
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
self.cur_lang_code = self.convert_tokens_to_ids(lang)
self.prefix_tokens = []
self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
__all__ = ["MBartTokenizer"]
| MBartTokenizer |
python | matplotlib__matplotlib | lib/mpl_toolkits/axisartist/axislines.py | {
"start": 5072,
"end": 5356
} | class ____(_AxisArtistHelperBase):
def __init__(self, nth_coord, value):
self._value = value
super().__init__(nth_coord)
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
| _FloatingAxisArtistHelperBase |
python | qdrant__qdrant-client | qdrant_client/local/payload_value_setter.py | {
"start": 987,
"end": 2871
} | class ____:
TYPE: Any
SETTERS: dict[JsonPathItemType, Type["Setter"]] = {}
@classmethod
def add_setter(cls, item_type: JsonPathItemType, setter: Type["Setter"]) -> None:
cls.SETTERS[item_type] = setter
@classmethod
def set(
cls,
data: Any,
k_list: list[JsonPathItem],
value: dict[str, Any],
prev_data: Any,
prev_key: Optional[JsonPathItem],
) -> None:
if not k_list:
return
current_key = k_list.pop(0)
cls.SETTERS[current_key.item_type]._set(
data,
current_key,
k_list,
value,
prev_data,
prev_key,
)
@classmethod
def _set(
cls,
data: Any,
current_key: JsonPathItem,
k_list: list[JsonPathItem],
value: dict[str, Any],
prev_data: Any,
prev_key: Optional[JsonPathItem],
) -> None:
if isinstance(data, cls.TYPE):
cls._set_compatible_types(
data=data, current_key=current_key, k_list=k_list, value=value
)
else:
cls._set_incompatible_types(
current_key=current_key,
k_list=k_list,
value=value,
prev_data=prev_data,
prev_key=prev_key,
)
@classmethod
def _set_compatible_types(
cls,
data: Any,
current_key: JsonPathItem,
k_list: list[JsonPathItem],
value: dict[str, Any],
) -> None:
raise NotImplementedError()
@classmethod
def _set_incompatible_types(
cls,
current_key: JsonPathItem,
k_list: list[JsonPathItem],
value: dict[str, Any],
prev_data: Any,
prev_key: Optional[JsonPathItem],
) -> None:
raise NotImplementedError()
| Setter |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 65435,
"end": 65787
} | class ____(Locator):
"""
Place no ticks.
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
"""
Return the locations of the ticks.
.. note::
Because there are no ticks, *vmin* and *vmax* are not used.
"""
return []
| NullLocator |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 50041,
"end": 50267
} | class ____(_TestSubsets, __TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
| TestSubsetEqualEmpty |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/packages.py | {
"start": 2969,
"end": 3069
} | class ____(Exception):
"""Exception raised when package is suspended."""
| PackageSuspendedException |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 136707,
"end": 137092
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("type", "value")
type = sgqlc.types.Field(
sgqlc.types.non_null(SecurityAdvisoryIdentifierType), graphql_name="type"
)
value = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="value")
| SecurityAdvisoryIdentifierFilter |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 3829,
"end": 3947
} | class ____(vLLMTranscriptionResponse):
model_config = ConfigDict(arbitrary_types_allowed=True)
| TranscriptionResponse |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/op_execution_context.py | {
"start": 3388,
"end": 53288
} | class ____(AbstractComputeExecutionContext):
"""The ``context`` object that can be made available as the first argument to the function
used for computing an op or asset.
This context object provides system information such as resources, config, and logging.
To construct an execution context for testing purposes, use :py:func:`dagster.build_op_context`.
Example:
.. code-block:: python
from dagster import op, OpExecutionContext
@op
def hello_world(context: OpExecutionContext):
context.log.info("Hello, world!")
"""
__slots__ = ["_step_execution_context"]
def __init__(self, step_execution_context: StepExecutionContext):
self._step_execution_context = check.inst_param(
step_execution_context,
"step_execution_context",
StepExecutionContext,
)
self._pdb: Optional[ForkedPdb] = None
self._events: list[DagsterEvent] = []
self._output_metadata: dict[str, Any] = {}
@property
def op_execution_context(self) -> "OpExecutionContext":
return self
@public
@property
def op_config(self) -> Any:
"""Any: The parsed config specific to this op."""
return self._step_execution_context.op_config
@property
def dagster_run(self) -> DagsterRun:
"""DagsterRun: The current run."""
return self._step_execution_context.dagster_run
@public
@property
def run(self) -> DagsterRun:
"""DagsterRun: The current run."""
return self.dagster_run
@public
@property
def instance(self) -> DagsterInstance:
"""DagsterInstance: The current Dagster instance."""
return self._step_execution_context.instance
@public
@property
def pdb(self) -> ForkedPdb:
"""dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the op.
Example:
.. code-block:: python
@op
def debug(context):
context.pdb.set_trace()
"""
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@public
@property
def resources(self) -> Any:
"""Resources: The currently available resources."""
return self._step_execution_context.resources
@property
def step_launcher(self) -> Optional[StepLauncher]:
"""Optional[StepLauncher]: The current step launcher, if any."""
return self._step_execution_context.step_launcher
@public
@property
def run_id(self) -> str:
"""str: The id of the current execution's run."""
return self._step_execution_context.run_id
@public
@property
def run_config(self) -> Mapping[str, object]:
"""dict: The run config for the current execution."""
return self._step_execution_context.run_config
@public
@property
def job_def(self) -> JobDefinition:
"""JobDefinition: The currently executing job."""
return self._step_execution_context.job_def
@property
def repository_def(self) -> RepositoryDefinition:
"""RepositoryDefinition: The Dagster repository for the currently executing job."""
return self._step_execution_context.repository_def
@public
@property
def job_name(self) -> str:
"""str: The name of the currently executing pipeline."""
return self._step_execution_context.job_name
@public
@property
def log(self) -> DagsterLogManager:
"""DagsterLogManager: The log manager available in the execution context."""
return self._step_execution_context.log
@property
def node_handle(self) -> NodeHandle:
"""NodeHandle: The current op's handle.
:meta private:
"""
return self._step_execution_context.node_handle
@property
def op_handle(self) -> NodeHandle:
"""NodeHandle: The current op's handle.
:meta private:
"""
return self.node_handle
@property
def op(self) -> Node:
"""Node: The object representing the invoked op within the graph.
:meta private:
"""
return self._step_execution_context.job_def.get_node(self.node_handle)
@public
@property
def op_def(self) -> OpDefinition:
"""OpDefinition: The current op definition."""
return cast("OpDefinition", self.op.definition)
@public
@property
def has_partitions(self) -> bool:
"""Whether the current run is a partitioned run."""
return self._step_execution_context.has_partitions
@public
@property
def has_partition_key(self) -> bool:
"""Whether the current run targets a single partition."""
return self._step_execution_context.has_partition_key
@public
@property
def partition_key(self) -> str:
"""The partition key for the current run.
Raises an error if the current run is not a partitioned run. Or if the current run is operating
over a range of partitions (ie. a backfill of several partitions executed in a single run).
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def my_asset(context: AssetExecutionContext):
context.log.info(context.partition_key)
# materializing the 2023-08-21 partition of this asset will log:
# "2023-08-21"
"""
return self._step_execution_context.partition_key
@public
@property
def partition_keys(self) -> Sequence[str]:
"""Returns a list of the partition keys for the current run.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``partition_keys`` to get all of the partitions being materialized
by the backfill.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(partitions_def=partitions_def)
def an_asset(context: AssetExecutionContext):
context.log.info(context.partition_keys)
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]
"""
key_range = self.partition_key_range
partitions_def = self._step_execution_context.run_partitions_def
if partitions_def is None:
raise DagsterInvariantViolationError(
"Cannot access partition_keys for a non-partitioned run"
)
with partition_loading_context(dynamic_partitions_store=self.instance):
return partitions_def.get_partition_keys_in_range(key_range)
@deprecated(breaking_version="2.0", additional_warn_text="Use `partition_key_range` instead.")
@public
@property
def asset_partition_key_range(self) -> PartitionKeyRange:
"""The range of partition keys for the current run.
If run is for a single partition key, return a `PartitionKeyRange` with the same start and
end. Raises an error if the current run is not a partitioned run.
"""
return self.partition_key_range
@public
@property
def has_partition_key_range(self) -> bool:
"""Whether the current run targets a range of partitions."""
return self._step_execution_context.has_partition_key_range
@public
@property
def partition_key_range(self) -> PartitionKeyRange:
"""The range of partition keys for the current run.
If run is for a single partition key, returns a `PartitionKeyRange` with the same start and
end. Raises an error if the current run is not a partitioned run.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def my_asset(context: AssetExecutionContext):
context.log.info(context.partition_key_range)
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-21", end="2023-08-25")
"""
return self._step_execution_context.partition_key_range
@public
@property
def partition_time_window(self) -> TimeWindow:
"""The partition time window for the current run.
Raises an error if the current run is not a partitioned run, or if the job's partition
definition is not a TimeWindowPartitionsDefinition.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def my_asset(context: AssetExecutionContext):
context.log.info(context.partition_time_window)
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-21", "2023-08-22")
"""
return self._step_execution_context.partition_time_window
@public
def has_tag(self, key: str) -> bool:
"""Check if a logging tag is set.
Args:
key (str): The tag to check.
Returns:
bool: Whether the tag is set.
"""
return key in self.dagster_run.tags
@public
def get_tag(self, key: str) -> Optional[str]:
"""Get a logging tag.
Args:
key (tag): The tag to get.
Returns:
Optional[str]: The value of the tag, if present.
"""
return self.dagster_run.tags.get(key)
@property
def run_tags(self) -> Mapping[str, str]:
"""Mapping[str, str]: The tags for the current run."""
return self.dagster_run.tags
def has_events(self) -> bool:
return bool(self._events)
def consume_events(self) -> Iterator[DagsterEvent]:
"""Pops and yields all user-generated events that have been recorded from this context.
If consume_events has not yet been called, this will yield all logged events since the beginning of the op's computation. If consume_events has been called, it will yield all events since the last time consume_events was called. Designed for internal use. Users should never need to invoke this method.
"""
events = self._events
self._events = []
yield from events
@public
def log_event(self, event: UserEvent) -> None:
"""Log an AssetMaterialization, AssetObservation, or ExpectationResult from within the body of an op.
Events logged with this method will appear in the list of DagsterEvents, as well as the event log.
Args:
event (Union[AssetMaterialization, AssetObservation, ExpectationResult]): The event to log.
**Examples:**
.. code-block:: python
from dagster import op, AssetMaterialization
@op
def log_materialization(context):
context.log_event(AssetMaterialization("foo"))
"""
if isinstance(event, AssetMaterialization):
self._events.append(
DagsterEvent.asset_materialization(self._step_execution_context, event)
)
elif isinstance(event, AssetCheckEvaluation):
self._events.append(
DagsterEvent.asset_check_evaluation(self._step_execution_context, event)
)
elif isinstance(event, AssetObservation):
self._events.append(DagsterEvent.asset_observation(self._step_execution_context, event))
elif isinstance(event, ExpectationResult):
self._events.append(
DagsterEvent.step_expectation_result(self._step_execution_context, event)
)
else:
check.failed(f"Unexpected event {event}")
@public
def add_output_metadata(
self,
metadata: Mapping[str, Any],
output_name: Optional[str] = None,
mapping_key: Optional[str] = None,
) -> None:
"""Add metadata to one of the outputs of an op.
This can be invoked multiple times per output in the body of an op. If the same key is
passed multiple times, the value associated with the last call will be used.
Args:
metadata (Mapping[str, Any]): The metadata to attach to the output
output_name (Optional[str]): The name of the output to attach metadata to. If there is only one output on the op, then this argument does not need to be provided. The metadata will automatically be attached to the only output.
mapping_key (Optional[str]): The mapping key of the output to attach metadata to. If the
output is not dynamic, this argument does not need to be provided.
**Examples:**
.. code-block:: python
from dagster import Out, op
from typing import Tuple
@op
def add_metadata(context):
context.add_output_metadata({"foo", "bar"})
return 5 # Since the default output is called "result", metadata will be attached to the output "result".
@op(out={"a": Out(), "b": Out()})
def add_metadata_two_outputs(context) -> Tuple[str, int]:
context.add_output_metadata({"foo": "bar"}, output_name="b")
context.add_output_metadata({"baz": "bat"}, output_name="a")
return ("dog", 5)
"""
metadata = check.mapping_param(metadata, "metadata", key_type=str)
output_name = check.opt_str_param(output_name, "output_name")
mapping_key = check.opt_str_param(mapping_key, "mapping_key")
self._step_execution_context.add_output_metadata(
metadata=metadata, output_name=output_name, mapping_key=mapping_key
)
def get_output_metadata(
self, output_name: str, mapping_key: Optional[str] = None
) -> Optional[Mapping[str, Any]]:
return self._step_execution_context.get_output_metadata(
output_name=output_name, mapping_key=mapping_key
)
def get_step_execution_context(self) -> StepExecutionContext:
"""Allows advanced users (e.g. framework authors) to punch through to the underlying
step execution context.
:meta private:
Returns:
StepExecutionContext: The underlying system context.
"""
return self._step_execution_context
@public
@property
def retry_number(self) -> int:
"""Which retry attempt is currently executing i.e. 0 for initial attempt, 1 for first retry, etc."""
return self._step_execution_context.previous_attempt_count
def describe_op(self) -> str:
return self._step_execution_context.describe_op()
@public
def get_mapping_key(self) -> Optional[str]:
"""Which mapping_key this execution is for if downstream of a DynamicOutput, otherwise None."""
return self._step_execution_context.step.get_mapping_key()
#############################################################################################
# asset related methods
#############################################################################################
@public
@property
def asset_key(self) -> AssetKey:
"""The AssetKey for the current asset. In a multi_asset, use asset_key_for_output instead."""
if self.has_assets_def and len(self.assets_def.keys_by_output_name.keys()) > 1:
raise DagsterInvariantViolationError(
"Cannot call `context.asset_key` in a multi_asset with more than one asset. Use"
" `context.asset_key_for_output` instead."
)
return next(iter(self.assets_def.keys_by_output_name.values()))
@public
@property
def has_assets_def(self) -> bool:
"""If there is a backing AssetsDefinition for what is currently executing."""
assets_def = self.job_def.asset_layer.get_assets_def_for_node(self.node_handle)
return assets_def is not None
@public
@property
def assets_def(self) -> AssetsDefinition:
"""The backing AssetsDefinition for what is currently executing, errors if not available."""
assets_def = self.job_def.asset_layer.get_assets_def_for_node(self.node_handle)
if assets_def is None:
raise DagsterInvalidPropertyError(
f"Op '{self.op.name}' does not have an assets definition."
)
return assets_def
@public
@property
def selected_asset_keys(self) -> AbstractSet[AssetKey]:
"""Get the set of AssetKeys this execution is expected to materialize."""
if not self.has_assets_def:
return set()
return self.assets_def.keys
@property
def is_subset(self):
"""Whether the current AssetsDefinition is subsetted. Note that this can be True inside a
a graph asset for an op that's not subsetted, if the graph asset is subsetted elsewhere.
"""
if not self.has_assets_def:
return False
return self.assets_def.is_subset
@public
@property
def selected_asset_check_keys(self) -> AbstractSet[AssetCheckKey]:
"""Get the asset check keys that correspond to the current selection of assets this execution is expected to materialize."""
return self.assets_def.check_keys if self.has_assets_def else set()
@public
@property
def selected_output_names(self) -> AbstractSet[str]:
"""Get the output names that correspond to the current selection of assets this execution is expected to materialize."""
return self._step_execution_context.selected_output_names
@public
def asset_key_for_output(self, output_name: str = "result") -> AssetKey:
"""Return the AssetKey for the corresponding output."""
asset_key = self.job_def.asset_layer.get_asset_key_for_node_output(
node_handle=self.op_handle, output_name=output_name
)
if asset_key is None:
check.failed(f"Output '{output_name}' has no asset")
else:
return asset_key
@public
def output_for_asset_key(self, asset_key: AssetKey) -> str:
"""Return the output name for the corresponding asset key."""
node_output_handle = self.job_def.asset_layer.get_op_output_handle(asset_key)
if node_output_handle is None:
check.failed(f"Asset key '{asset_key}' has no output")
else:
return node_output_handle.output_name
@public
def asset_key_for_input(self, input_name: str) -> AssetKey:
"""Return the AssetKey for the corresponding input."""
key = self.job_def.asset_layer.get_asset_key_for_node_input(
inner_node_handle=self.op_handle, input_name=input_name
)
if key is None:
check.failed(f"Input '{input_name}' has no asset")
else:
return key
@deprecated(breaking_version="2.0", additional_warn_text="Use `partition_key` instead.")
@public
def asset_partition_key_for_output(self, output_name: str = "result") -> str:
"""Returns the asset partition key for the given output.
Args:
output_name (str): For assets defined with the ``@asset`` decorator, the name of the output
will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``
should be the op output associated with the asset key (as determined by AssetOut)
to get the partition key for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext):
context.log.info(context.asset_partition_key_for_output())
# materializing the 2023-08-21 partition of this asset will log:
# "2023-08-21"
@multi_asset(
outs={
"first_asset": AssetOut(key=["my_assets", "first_asset"]),
"second_asset": AssetOut(key=["my_assets", "second_asset"]),
},
partitions_def=partitions_def,
)
def a_multi_asset(context: AssetExecutionContext):
context.log.info(context.asset_partition_key_for_output("first_asset"))
context.log.info(context.asset_partition_key_for_output("second_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# "2023-08-21"
# "2023-08-21"
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partition_key_for_output())
# materializing the 2023-08-21 partition of this asset will log:
# "2023-08-21"
"""
return self._step_execution_context.asset_partition_key_for_output(output_name)
@deprecated(breaking_version="2.0", additional_warn_text="Use `partition_time_window` instead.")
@public
def asset_partitions_time_window_for_output(self, output_name: str = "result") -> TimeWindow:
"""The time window for the partitions of the output asset.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``asset_partitions_time_window_for_output`` to get the TimeWindow of all of the partitions
being materialized by the backfill.
Raises an error if either of the following are true:
- The output asset has no partitioning.
- The output asset is not partitioned with a TimeWindowPartitionsDefinition or a
MultiPartitionsDefinition with one time-partitioned dimension.
Args:
output_name (str): For assets defined with the ``@asset`` decorator, the name of the output
will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``
should be the op output associated with the asset key (as determined by AssetOut)
to get the time window for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext):
context.log.info(context.asset_partitions_time_window_for_output())
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-21", "2023-08-22")
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# TimeWindow("2023-08-21", "2023-08-26")
@multi_asset(
outs={
"first_asset": AssetOut(key=["my_assets", "first_asset"]),
"second_asset": AssetOut(key=["my_assets", "second_asset"]),
},
partitions_def=partitions_def,
)
def a_multi_asset(context: AssetExecutionContext):
context.log.info(context.asset_partitions_time_window_for_output("first_asset"))
context.log.info(context.asset_partitions_time_window_for_output("second_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-21", "2023-08-22")
# TimeWindow("2023-08-21", "2023-08-22")
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# TimeWindow("2023-08-21", "2023-08-26")
# TimeWindow("2023-08-21", "2023-08-26")
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partitions_time_window_for_output())
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-21", "2023-08-22")
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# TimeWindow("2023-08-21", "2023-08-26")
"""
return self._step_execution_context.asset_partitions_time_window_for_output(output_name)
@deprecated(breaking_version="2.0", additional_warn_text="Use `partition_key_range` instead.")
@public
def asset_partition_key_range_for_output(
self, output_name: str = "result"
) -> PartitionKeyRange:
"""Return the PartitionKeyRange for the corresponding output. Errors if the run is not partitioned.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``asset_partition_key_range_for_output`` to get all of the partitions being materialized
by the backfill.
Args:
output_name (str): For assets defined with the ``@asset`` decorator, the name of the output
will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``
should be the op output associated with the asset key (as determined by AssetOut)
to get the partition key range for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext):
context.log.info(context.asset_partition_key_range_for_output())
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-21", end="2023-08-25")
@multi_asset(
outs={
"first_asset": AssetOut(key=["my_assets", "first_asset"]),
"second_asset": AssetOut(key=["my_assets", "second_asset"]),
},
partitions_def=partitions_def,
)
def a_multi_asset(context: AssetExecutionContext):
context.log.info(context.asset_partition_key_range_for_output("first_asset"))
context.log.info(context.asset_partition_key_range_for_output("second_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-21", end="2023-08-25")
# PartitionKeyRange(start="2023-08-21", end="2023-08-25")
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partition_key_range_for_output())
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-21", end="2023-08-25")
"""
return self._step_execution_context.asset_partition_key_range_for_output(output_name)
@public
def asset_partition_key_range_for_input(self, input_name: str) -> PartitionKeyRange:
"""Return the PartitionKeyRange for the corresponding input. Errors if the asset depends on a
non-contiguous chunk of the input.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``asset_partition_key_range_for_input`` to get the range of partitions keys of the input that
are relevant to that backfill.
Args:
input_name (str): The name of the input to get the time window for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def upstream_asset():
...
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partition_key_range_for_input("upstream_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-21", end="2023-08-25")
@asset(
ins={
"upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
},
partitions_def=partitions_def,
)
def another_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partition_key_range_for_input("upstream_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-20", end="2023-08-24")
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partition_key_range_for_input("self_dependent_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# PartitionKeyRange(start="2023-08-20", end="2023-08-24")
"""
return self._step_execution_context.asset_partition_key_range_for_input(input_name)
@public
def asset_partition_key_for_input(self, input_name: str) -> str:
"""Returns the partition key of the upstream asset corresponding to the given input.
Args:
input_name (str): The name of the input to get the partition key for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def upstream_asset():
...
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partition_key_for_input("upstream_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# "2023-08-21"
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partition_key_for_input("self_dependent_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# "2023-08-20"
"""
return self._step_execution_context.asset_partition_key_for_input(input_name)
@public
def asset_partitions_def_for_output(self, output_name: str = "result") -> PartitionsDefinition:
"""The PartitionsDefinition on the asset corresponding to this output.
Args:
output_name (str): For assets defined with the ``@asset`` decorator, the name of the output
will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``
should be the op output associated with the asset key (as determined by AssetOut)
to get the PartitionsDefinition for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def upstream_asset(context: AssetExecutionContext):
context.log.info(context.asset_partitions_def_for_output())
# materializing the 2023-08-21 partition of this asset will log:
# DailyPartitionsDefinition("2023-08-20")
@multi_asset(
outs={
"first_asset": AssetOut(key=["my_assets", "first_asset"]),
"second_asset": AssetOut(key=["my_assets", "second_asset"]),
},
partitions_def=partitions_def,
)
def a_multi_asset(context: AssetExecutionContext):
context.log.info(context.asset_partitions_def_for_output("first_asset"))
context.log.info(context.asset_partitions_def_for_output("second_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# DailyPartitionsDefinition("2023-08-20")
# DailyPartitionsDefinition("2023-08-20")
"""
asset_key = self.asset_key_for_output(output_name)
result = self._step_execution_context.job_def.asset_layer.get(asset_key).partitions_def
if result is None:
raise DagsterInvariantViolationError(
f"Attempting to access partitions def for asset {asset_key}, but it is not"
" partitioned"
)
return result
@public
def asset_partitions_def_for_input(self, input_name: str) -> PartitionsDefinition:
"""The PartitionsDefinition on the upstream asset corresponding to this input.
Args:
input_name (str): The name of the input to get the PartitionsDefinition for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def upstream_asset():
...
@asset(
partitions_def=partitions_def
)
def upstream_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partitions_def_for_input("upstream_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# DailyPartitionsDefinition("2023-08-20")
"""
asset_key = self.asset_key_for_input(input_name)
result = self._step_execution_context.job_def.asset_layer.get(asset_key).partitions_def
if result is None:
raise DagsterInvariantViolationError(
f"Attempting to access partitions def for asset {asset_key}, but it is not"
" partitioned"
)
return result
@deprecated(breaking_version="2.0", additional_warn_text="Use `partition_keys` instead.")
@public
def asset_partition_keys_for_output(self, output_name: str = "result") -> Sequence[str]:
"""Returns a list of the partition keys for the given output.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``asset_partition_keys_for_output`` to get all of the partitions being materialized
by the backfill.
Args:
output_name (str): For assets defined with the ``@asset`` decorator, the name of the output
will be automatically provided. For assets defined with ``@multi_asset``, ``output_name``
should be the op output associated with the asset key (as determined by AssetOut)
to get the partition keys for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext):
context.log.info(context.asset_partition_keys_for_output())
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]
@multi_asset(
outs={
"first_asset": AssetOut(key=["my_assets", "first_asset"]),
"second_asset": AssetOut(key=["my_assets", "second_asset"]),
},
partitions_def=partitions_def,
)
def a_multi_asset(context: AssetExecutionContext):
context.log.info(context.asset_partition_keys_for_output("first_asset"))
context.log.info(context.asset_partition_keys_for_output("second_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]
# ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partition_keys_for_output())
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]
"""
with partition_loading_context(dynamic_partitions_store=self.instance):
return self.asset_partitions_def_for_output(output_name).get_partition_keys_in_range(
self._step_execution_context.asset_partition_key_range_for_output(output_name),
)
@public
def asset_partition_keys_for_input(self, input_name: str) -> Sequence[str]:
"""Returns a list of the partition keys of the upstream asset corresponding to the
given input.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``asset_partition_keys_for_input`` to get all of the partition keys of the input that
are relevant to that backfill.
Args:
input_name (str): The name of the input to get the time window for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def upstream_asset():
...
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partition_keys_for_input("upstream_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24", "2023-08-25"]
@asset(
ins={
"upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
},
partitions_def=partitions_def,
)
def another_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partition_keys_for_input("upstream_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-20", "2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24"]
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partition_keys_for_input("self_dependent_asset"))
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# ["2023-08-20", "2023-08-21", "2023-08-22", "2023-08-23", "2023-08-24"]
"""
return list(
self._step_execution_context.asset_partitions_subset_for_input(
input_name
).get_partition_keys()
)
@public
def asset_partitions_time_window_for_input(self, input_name: str = "result") -> TimeWindow:
"""The time window for the partitions of the input asset.
If you want to write your asset to support running a backfill of several partitions in a single run,
you can use ``asset_partitions_time_window_for_input`` to get the time window of the input that
are relevant to that backfill.
Raises an error if either of the following are true:
- The input asset has no partitioning.
- The input asset is not partitioned with a TimeWindowPartitionsDefinition or a
MultiPartitionsDefinition with one time-partitioned dimension.
Args:
input_name (str): The name of the input to get the partition key for.
Examples:
.. code-block:: python
partitions_def = DailyPartitionsDefinition("2023-08-20")
@asset(
partitions_def=partitions_def
)
def upstream_asset():
...
@asset(
partitions_def=partitions_def
)
def an_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partitions_time_window_for_input("upstream_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-21", "2023-08-22")
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# TimeWindow("2023-08-21", "2023-08-26")
@asset(
ins={
"upstream_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
},
partitions_def=partitions_def,
)
def another_asset(context: AssetExecutionContext, upstream_asset):
context.log.info(context.asset_partitions_time_window_for_input("upstream_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-20", "2023-08-21")
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# TimeWindow("2023-08-21", "2023-08-26")
@asset(
partitions_def=partitions_def,
ins={
"self_dependent_asset": AssetIn(partition_mapping=TimeWindowPartitionMapping(start_offset=-1, end_offset=-1)),
}
)
def self_dependent_asset(context: AssetExecutionContext, self_dependent_asset):
context.log.info(context.asset_partitions_time_window_for_input("self_dependent_asset"))
# materializing the 2023-08-21 partition of this asset will log:
# TimeWindow("2023-08-20", "2023-08-21")
# running a backfill of the 2023-08-21 through 2023-08-25 partitions of this asset will log:
# TimeWindow("2023-08-20", "2023-08-25")
"""
return self._step_execution_context.asset_partitions_time_window_for_input(input_name)
@public
def get_asset_provenance(self, asset_key: AssetKey) -> Optional[DataProvenance]:
"""Return the provenance information for the most recent materialization of an asset.
Args:
asset_key (AssetKey): Key of the asset for which to retrieve provenance.
Returns:
Optional[DataProvenance]: Provenance information for the most recent
materialization of the asset. Returns `None` if the asset was never materialized or
the materialization record is too old to contain provenance information.
"""
record = self.instance.get_latest_data_version_record(asset_key)
return (
None if record is None else extract_data_provenance_from_entry(record.event_log_entry)
)
def set_data_version(self, asset_key: AssetKey, data_version: DataVersion) -> None:
"""Set the data version for an asset being materialized by the currently executing step.
This is useful for external execution situations where it is not possible to return
an `Output`.
Args:
asset_key (AssetKey): Key of the asset for which to set the data version.
data_version (DataVersion): The data version to set.
"""
self._step_execution_context.set_data_version(asset_key, data_version)
# In this mode no conversion is done on returned values and missing but expected outputs are not
# allowed.
@property
def requires_typed_event_stream(self) -> bool:
return self._step_execution_context.requires_typed_event_stream
@property
def typed_event_stream_error_message(self) -> Optional[str]:
return self._step_execution_context.typed_event_stream_error_message
def set_requires_typed_event_stream(self, *, error_message: Optional[str] = None) -> None:
self._step_execution_context.set_requires_typed_event_stream(error_message=error_message)
@staticmethod
def get() -> "OpExecutionContext":
from dagster._core.execution.context.compute import current_execution_context
ctx = current_execution_context.get()
if ctx is None:
raise DagsterInvariantViolationError("No current OpExecutionContext in scope.")
return ctx.op_execution_context
def load_asset_value(
self,
asset_key: AssetKey,
*,
python_type: Optional[type] = None,
partition_key: Optional[str] = None,
) -> Any:
"""Loads the value of an asset by key.
Args:
asset_key (AssetKey): The key of the asset to load.
python_type (Optional[type]): The python type to load the asset as. This is what will
be returned inside `load_input` by `context.dagster_type.typing_type`.
partition_key (Optional[str]): The partition of the asset to load.
Example:
.. code-block:: python
@dg.asset(deps=[dg.AssetKey("upstream_asset")])
def my_asset(context: dg.AssetExecutionContext):
return context.load_asset_value(dg.AssetKey("upstream_asset")) * 2
"""
from dagster._core.storage.asset_value_loader import AssetValueLoader
loader = AssetValueLoader(
assets_defs_by_key={
asset_key: self._step_execution_context.job_def.asset_layer.get(
asset_key
).assets_def
},
instance=self.instance,
)
return loader.load_asset_value(
asset_key,
python_type=python_type,
partition_key=partition_key,
)
| OpExecutionContext |
python | kamyu104__LeetCode-Solutions | Python/find-the-number-of-k-even-arrays.py | {
"start": 82,
"end": 1412
} | class ____(object):
def countOfArrays(self, n, m, k):
"""
:type n: int
:type m: int
:type k: int
:rtype: int
"""
MOD = 10**9+7
fact, inv, inv_fact = [[1]*2 for _ in xrange(3)]
def nCr(n, k):
while len(inv) <= n: # lazy initialization
fact.append(fact[-1]*len(inv) % MOD)
inv.append(inv[MOD%len(inv)]*(MOD-MOD//len(inv)) % MOD) # https://cp-algorithms.com/algebra/module-inverse.html
inv_fact.append(inv_fact[-1]*inv[-1] % MOD)
return (fact[n]*inv_fact[n-k] % MOD) * inv_fact[k] % MOD
def nHr(n, k):
return nCr(n+k-1, k)
lookup = collections.defaultdict(list)
def pow(a, b):
while len(lookup[a]) <= b:
lookup[a].append((lookup[a][-1]*a)%MOD if lookup[a] else 1)
return lookup[a][b]
MOD = 10**9+7
even, odd = m//2, (m+1)//2
result = 0
if k == 0:
result = (result+pow(odd, n))%MOD
for x in xrange(1, (n+1-k)//2+1): # since (n-(k+x))-((x+1)-2) >= 0, so x <= (n+1-k)/2
result = (result+(nHr(x, (k+x)-x)*nHr(x+1, (n-(k+x))-((x+1)-2))*pow(even, k+x)*pow(odd, n-(k+x))%MOD))%MOD
return result
# Time: O(n * k)
# Space: O(k)
# dp
| Solution |
python | numba__numba | numba/core/typing/arraydecl.py | {
"start": 22957,
"end": 24042
} | class ____(AbstractTemplate):
def generic(self, args, kws):
# Resolution of members for records
record, idx = args
if isinstance(record, types.Record):
if isinstance(idx, types.StringLiteral):
if idx.literal_value not in record.fields:
msg = (f"Field '{idx.literal_value}' was not found in "
f"record with fields {tuple(record.fields.keys())}")
raise NumbaKeyError(msg)
ret = record.typeof(idx.literal_value)
assert ret
return signature(ret, *args)
elif isinstance(idx, types.IntegerLiteral):
if idx.literal_value >= len(record.fields):
msg = f"Requested index {idx.literal_value} is out of range"
raise NumbaIndexError(msg)
field_names = list(record.fields)
ret = record.typeof(field_names[idx.literal_value])
assert ret
return signature(ret, *args)
@infer
| StaticGetItemLiteralRecord |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/utils_v1/export_output.py | {
"start": 8127,
"end": 13287
} | class ____(ExportOutput):
"""Represents the output of a supervised training or eval process."""
__metaclass__ = abc.ABCMeta
LOSS_NAME = 'loss'
PREDICTIONS_NAME = 'predictions'
METRICS_NAME = 'metrics'
METRIC_VALUE_SUFFIX = 'value'
METRIC_UPDATE_SUFFIX = 'update_op'
_loss = None
_predictions = None
_metrics = None
def __init__(self, loss=None, predictions=None, metrics=None):
"""Constructor for SupervisedOutput (ie, Train or Eval output).
Args:
loss: dict of Tensors or single Tensor representing calculated loss.
predictions: dict of Tensors or single Tensor representing model
predictions.
metrics: Dict of metric results keyed by name.
The values of the dict can be one of the following:
(1) instance of `Metric` class.
(2) (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op.
Raises:
ValueError: if any of the outputs' dict keys are not strings or tuples of
strings or the values are not Tensors (or Operations in the case of
update_op).
"""
if loss is not None:
loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)
self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)
if predictions is not None:
pred_dict = self._wrap_and_check_outputs(
predictions, self.PREDICTIONS_NAME)
self._predictions = self._prefix_output_keys(
pred_dict, self.PREDICTIONS_NAME)
if metrics is not None:
self._metrics = self._wrap_and_check_metrics(metrics)
def _prefix_output_keys(self, output_dict, output_name):
"""Prepend output_name to the output_dict keys if it doesn't exist.
This produces predictable prefixes for the pre-determined outputs
of SupervisedOutput.
Args:
output_dict: dict of string to Tensor, assumed valid.
output_name: prefix string to prepend to existing keys.
Returns:
dict with updated keys and existing values.
"""
new_outputs = {}
for key, val in output_dict.items():
key = self._prefix_key(key, output_name)
new_outputs[key] = val
return new_outputs
def _prefix_key(self, key, output_name):
if key.find(output_name) != 0:
key = output_name + self._SEPARATOR_CHAR + key
return key
def _wrap_and_check_metrics(self, metrics):
"""Handle the saving of metrics.
Metrics is either a tuple of (value, update_op), or a dict of such tuples.
Here, we separate out the tuples and create a dict with names to tensors.
Args:
metrics: Dict of metric results keyed by name.
The values of the dict can be one of the following:
(1) instance of `Metric` class.
(2) (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op.
Returns:
dict of output_names to tensors
Raises:
ValueError: if the dict key is not a string, or the metric values or ops
are not tensors.
"""
if not isinstance(metrics, dict):
metrics = {self.METRICS_NAME: metrics}
outputs = {}
for key, value in metrics.items():
if isinstance(value, tuple):
metric_val, metric_op = value
else: # value is a keras.Metrics object
metric_val = value.result()
assert len(value.updates) == 1 # We expect only one update op.
metric_op = value.updates[0]
key = self._check_output_key(key, self.METRICS_NAME)
key = self._prefix_key(key, self.METRICS_NAME)
val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX
op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX
if not isinstance(metric_val, tensor.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
key, metric_val))
if not (tensor_util.is_tensor(metric_op) or
isinstance(metric_op, ops.Operation)):
raise ValueError(
'{} update_op must be a Tensor or Operation; got {}.'.format(
key, metric_op))
# We must wrap any ops (or variables) in a Tensor before export, as the
# SignatureDef proto expects tensors only. See b/109740581
metric_op_tensor = metric_op
if not isinstance(metric_op, tensor.Tensor):
with ops.control_dependencies([metric_op]):
metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')
outputs[val_name] = metric_val
outputs[op_name] = metric_op_tensor
return outputs
@property
def loss(self):
return self._loss
@property
def predictions(self):
return self._predictions
@property
def metrics(self):
return self._metrics
@abc.abstractmethod
def _get_signature_def_fn(self):
"""Returns a function that produces a SignatureDef given desired outputs."""
pass
def as_signature_def(self, receiver_tensors):
signature_def_fn = self._get_signature_def_fn()
return signature_def_fn(
receiver_tensors, self.loss, self.predictions, self.metrics)
| _SupervisedOutput |
python | getsentry__sentry | src/sentry/api/endpoints/organization_releases.py | {
"start": 6859,
"end": 10777
} | class ____(ReleaseWithVersionSerializer):
projects = ListField()
headCommits = ListField(
child=ReleaseHeadCommitSerializerDeprecated(), required=False, allow_null=False
)
refs = ListField(child=ReleaseHeadCommitSerializer(), required=False, allow_null=False)
@sentry_sdk.trace
def debounce_update_release_health_data(organization, project_ids: list[int]):
"""This causes a flush of snuba health data to the postgres tables once
per minute for the given projects.
"""
# Figure out which projects need to get updates from the snuba.
should_update = {}
cache_keys = ["debounce-health:%d" % id for id in project_ids]
cache_data = cache.get_many(cache_keys)
for project_id, cache_key in zip(project_ids, cache_keys):
if cache_data.get(cache_key) is None:
should_update[project_id] = cache_key
if not should_update:
return
projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())}
# This gives us updates for all release-projects which have seen new
# health data over the last days. It will miss releases where the last
# date is longer than what `get_changed_project_release_model_adoptions`
# considers recent.
project_releases = release_health.backend.get_changed_project_release_model_adoptions(
should_update.keys()
)
# Pre-flight query which was broken out of the release-project query below. By running this
# in a separate query we can utilize the index on (organization, version) and remove a join.
# The total cost of the two queries is significantly less than a single query.
release_ids_and_versions = dict(
Release.objects.filter(
organization_id=organization.id,
version__in=[x[1] for x in project_releases],
).values_list("id", "version")
)
release_ids_and_project_ids = list(
ReleaseProject.objects.filter(
project_id__in=[x[0] for x in project_releases],
release_id__in=release_ids_and_versions.keys(),
).values_list("release_id", "project_id")
)
# I'm zipping the results of the two queries above to emulate the results of the old query
# which was removed. I'm not changing the existing semantics of the code. I'm only performance
# optimizing database access. Feel free to change.
existing = {
(project_id, release_ids_and_versions[release_id])
for release_id, project_id in release_ids_and_project_ids
}
to_upsert = []
for key in project_releases:
if key not in existing:
to_upsert.append(key)
if to_upsert:
dates = release_health.backend.get_oldest_health_data_for_releases(to_upsert)
for project_id, version in to_upsert:
project = projects.get(project_id)
if project is None:
# should not happen
continue
# Ignore versions that were saved with an empty string before validation was added
if not Release.is_valid_version(version):
continue
# We might have never observed the release. This for instance can
# happen if the release only had health data so far. For these cases
# we want to create the release the first time we observed it on the
# health side.
release = Release.get_or_create(
project=project, version=version, date_added=dates.get((project_id, version))
)
# Make sure that the release knows about this project. Like we had before
# the project might not have been associated with this release yet.
release.add_project(project)
# Debounce updates for a minute
cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60)
@region_silo_endpoint
| ReleaseSerializerWithProjects |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 1476,
"end": 1574
} | class ____(signedinteger):
name = "int16"
typecode = "h"
torch_dtype = torch.int16
| int16 |
python | PyCQA__pylint | tests/functional/m/method_hidden.py | {
"start": 429,
"end": 481
} | class ____:
def abcd(self):
pass
| AbcdMixin |
python | allegroai__clearml | clearml/backend_api/services/v2_23/workers.py | {
"start": 53854,
"end": 60435
} | class ____(Response):
"""
Response of workers.get_all endpoint.
:param workers:
:type workers: Sequence[Worker]
"""
_service = "workers"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"current_task_entry": {
"properties": {
"id": {"description": "Worker ID", "type": ["string", "null"]},
"last_iteration": {
"description": "Last task iteration",
"type": ["integer", "null"],
},
"name": {"description": "Worker name", "type": ["string", "null"]},
"running_time": {
"description": "Task running time",
"type": ["integer", "null"],
},
},
"type": "object",
},
"id_name_entry": {
"properties": {
"id": {"description": "Worker ID", "type": ["string", "null"]},
"name": {"description": "Worker name", "type": ["string", "null"]},
},
"type": "object",
},
"queue_entry": {
"properties": {
"id": {"description": "Worker ID", "type": ["string", "null"]},
"name": {"description": "Worker name", "type": ["string", "null"]},
"next_task": {
"description": "Next task in the queue",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
"num_tasks": {
"description": "Number of task entries in the queue",
"type": ["integer", "null"],
},
},
"type": "object",
},
"worker": {
"properties": {
"company": {
"description": "Associated company",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
"id": {"description": "Worker ID", "type": ["string", "null"]},
"ip": {
"description": "IP of the worker",
"type": ["string", "null"],
},
"key": {
"description": "Worker entry key",
"type": ["string", "null"],
},
"last_activity_time": {
"description": "Last activity time (even if an error occurred)",
"format": "date-time",
"type": ["string", "null"],
},
"last_report_time": {
"description": "Last successful report time",
"format": "date-time",
"type": ["string", "null"],
},
"project": {
"description": "Project in which currently executing task resides",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
"queue": {
"description": "Queue from which running task was taken",
"oneOf": [
{"$ref": "#/definitions/queue_entry"},
{"type": "null"},
],
},
"queues": {
"description": "List of queues on which the worker is listening",
"items": {"$ref": "#/definitions/queue_entry"},
"type": ["array", "null"],
},
"register_time": {
"description": "Registration time",
"format": "date-time",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags for the worker",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User tags for the worker",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task currently being run by the worker",
"oneOf": [
{"$ref": "#/definitions/current_task_entry"},
{"type": "null"},
],
},
"user": {
"description": "Associated user (under whose credentials are used by the worker daemon)",
"oneOf": [
{"$ref": "#/definitions/id_name_entry"},
{"type": "null"},
],
},
},
"type": "object",
},
},
"properties": {
"workers": {
"items": {"$ref": "#/definitions/worker"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, workers: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.workers = workers
@schema_property("workers")
def workers(self) -> Optional[List[Any]]:
return self._property_workers
@workers.setter
def workers(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_workers = None
return
self.assert_isinstance(value, "workers", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Worker.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "workers", Worker, is_array=True)
self._property_workers = value
| GetAllResponse |
python | PyCQA__pylint | tests/functional/d/dataclass/dataclass_with_default_factory.py | {
"start": 308,
"end": 552
} | class ____:
"""A test dataclass with a field, that has a default_factory."""
test: list = field(default_factory=list)
TEST = Test()
TEST.test.append(1)
print(TEST.test[0])
@dc.dataclass # Note the use of dc instead of dataclasses
| Test |
python | ray-project__ray | doc/source/serve/doc_code/getting_started/model_deployment_full.py | {
"start": 273,
"end": 1147
} | class ____:
def __init__(self):
# Load model
self.model = pipeline("translation_en_to_fr", model="t5-small")
def translate(self, text: str) -> str:
# Run inference
model_output = self.model(text)
# Post-process output to return only the translation text
translation = model_output[0]["translation_text"]
return translation
async def __call__(self, http_request: Request) -> str:
english_text: str = await http_request.json()
return self.translate(english_text)
translator_app = Translator.bind()
# __deployment_full_end__
translator_app = Translator.options(ray_actor_options={}).bind()
serve.run(translator_app)
import requests
response = requests.post("http://127.0.0.1:8000/", json="Hello world!").text
assert response == "Bonjour monde!"
serve.shutdown()
ray.shutdown()
| Translator |
python | getsentry__sentry | src/sentry/hybridcloud/models/outbox.py | {
"start": 16531,
"end": 17152
} | class ____(OutboxBase):
def send_signal(self) -> None:
process_region_outbox.send(
sender=OutboxCategory(self.category),
payload=self.payload,
object_identifier=self.object_identifier,
shard_identifier=self.shard_identifier,
shard_scope=self.shard_scope,
)
sharding_columns = ("shard_scope", "shard_identifier")
coalesced_columns = ("shard_scope", "shard_identifier", "category", "object_identifier")
class Meta:
abstract = True
__repr__ = sane_repr("payload", *coalesced_columns)
@region_silo_model
| RegionOutboxBase |
python | pandas-dev__pandas | asv_bench/benchmarks/reshape.py | {
"start": 4040,
"end": 4644
} | class ____:
def setup(self):
NUM_ROWS = 1000
self.df = DataFrame(
{
"A": np.random.randint(50, size=NUM_ROWS),
"B": np.random.randint(50, size=NUM_ROWS),
"C": np.random.randint(-10, 10, size=NUM_ROWS),
"D": np.random.randint(-10, 10, size=NUM_ROWS),
"E": np.random.randint(10, size=NUM_ROWS),
"F": np.random.randn(NUM_ROWS),
}
)
self.df = self.df.set_index(["A", "B", "C", "D", "E"])
def time_unstack(self):
self.df.unstack()
| SparseIndex |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 1107,
"end": 1892
} | class ____(TestCase):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
@xfail # (reason="not implemented")
| TestCommonType |
python | spack__spack | lib/spack/spack/solver/requirements.py | {
"start": 1020,
"end": 2632
} | class ____(NamedTuple):
"""Data class to collect information on a requirement"""
pkg_name: str
policy: str
origin: RequirementOrigin
requirements: Sequence[spack.spec.Spec]
condition: spack.spec.Spec
kind: RequirementKind
message: Optional[str]
def preference(
pkg_name: str,
constraint: spack.spec.Spec,
condition: spack.spec.Spec = spack.spec.Spec(),
origin: RequirementOrigin = RequirementOrigin.PREFER_YAML,
kind: RequirementKind = RequirementKind.PACKAGE,
message: Optional[str] = None,
) -> RequirementRule:
"""Returns a preference rule"""
# A strong preference is defined as:
#
# require:
# - any_of: [spec_str, "@:"]
return RequirementRule(
pkg_name=pkg_name,
policy="any_of",
requirements=[constraint, spack.spec.Spec("@:")],
kind=kind,
condition=condition,
origin=origin,
message=message,
)
def conflict(
pkg_name: str,
constraint: spack.spec.Spec,
condition: spack.spec.Spec = spack.spec.Spec(),
origin: RequirementOrigin = RequirementOrigin.CONFLICT_YAML,
kind: RequirementKind = RequirementKind.PACKAGE,
message: Optional[str] = None,
) -> RequirementRule:
"""Returns a conflict rule"""
# A conflict is defined as:
#
# require:
# - one_of: [spec_str, "@:"]
return RequirementRule(
pkg_name=pkg_name,
policy="one_of",
requirements=[constraint, spack.spec.Spec("@:")],
kind=kind,
condition=condition,
origin=origin,
message=message,
)
| RequirementRule |
python | numpy__numpy | numpy/distutils/npy_pkg_config.py | {
"start": 451,
"end": 1857
} | class ____(OSError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
| PkgNotFound |
python | walkccc__LeetCode | solutions/3068. Find the Maximum Sum of Node Values/3068.py | {
"start": 0,
"end": 384
} | class ____:
def maximumValueSum(
self,
nums: list[int],
k: int,
edges: list[list[int]],
) -> int:
maxSum = sum(max(num, num ^ k) for num in nums)
changedCount = sum((num ^ k) > num for num in nums)
if changedCount % 2 == 0:
return maxSum
minChangeDiff = min(abs(num - (num ^ k)) for num in nums)
return maxSum - minChangeDiff
| Solution |
python | numba__numba | numba/core/typing/templates.py | {
"start": 11182,
"end": 12935
} | class ____(FunctionTemplate):
"""
Defines method ``generic(self, args, kws)`` which compute a possible
signature base on input types. The signature does not have to match the
input types. It is compared against the input types afterwards.
"""
def apply(self, args, kws):
generic = getattr(self, "generic")
sig = generic(args, kws)
# Enforce that *generic()* must return None or Signature
if sig is not None:
if not isinstance(sig, Signature):
raise AssertionError(
"generic() must return a Signature or None. "
"{} returned {}".format(generic, type(sig)),
)
# Unpack optional type if no matching signature
if not sig and any(isinstance(x, types.Optional) for x in args):
def unpack_opt(x):
if isinstance(x, types.Optional):
return x.type
else:
return x
args = list(map(unpack_opt, args))
assert not kws # Not supported yet
sig = generic(args, kws)
return sig
def get_template_info(self):
impl = getattr(self, "generic")
basepath = os.path.dirname(os.path.dirname(numba.__file__))
code, firstlineno, path = self.get_source_code_info(impl)
sig = str(utils.pysignature(impl))
info = {
'kind': "overload",
'name': getattr(impl, '__qualname__', impl.__name__),
'sig': sig,
'filename': utils.safe_relpath(path, start=basepath),
'lines': (firstlineno, firstlineno + len(code) - 1),
'docstring': impl.__doc__
}
return info
| AbstractTemplate |
python | urllib3__urllib3 | src/urllib3/poolmanager.py | {
"start": 5384,
"end": 18453
} | class ____(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example:
.. code-block:: python
import urllib3
http = urllib3.PoolManager(num_pools=2)
resp1 = http.request("GET", "https://google.com/")
resp2 = http.request("GET", "https://google.com/mail")
resp3 = http.request("GET", "https://yahoo.com/")
print(len(http.pools))
# 2
"""
proxy: Url | None = None
proxy_config: ProxyConfig | None = None
def __init__(
self,
num_pools: int = 10,
headers: typing.Mapping[str, str] | None = None,
**connection_pool_kw: typing.Any,
) -> None:
super().__init__(headers)
# PoolManager handles redirects itself in PoolManager.urlopen().
# It always passes redirect=False to the underlying connection pool to
# suppress per-pool redirect handling. If the user supplied a non-Retry
# value (int/bool/etc) for retries and we let the pool normalize it
# while redirect=False, the resulting Retry object would have redirect
# handling disabled, which can interfere with PoolManager's own
# redirect logic. Normalize here so redirects remain governed solely by
# PoolManager logic.
if "retries" in connection_pool_kw:
retries = connection_pool_kw["retries"]
if not isinstance(retries, Retry):
retries = Retry.from_int(retries)
connection_pool_kw = connection_pool_kw.copy()
connection_pool_kw["retries"] = retries
self.connection_pool_kw = connection_pool_kw
self.pools: RecentlyUsedContainer[PoolKey, HTTPConnectionPool]
self.pools = RecentlyUsedContainer(num_pools)
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> typing.Literal[False]:
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(
self,
scheme: str,
host: str,
port: int,
request_context: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls: type[HTTPConnectionPool] = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Default blocksize to _DEFAULT_BLOCKSIZE if missing or explicitly
# set to 'None' in the request_context.
if request_context.get("blocksize") is None:
request_context["blocksize"] = _DEFAULT_BLOCKSIZE
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self) -> None:
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(
self,
host: str | None,
port: int | None = None,
scheme: str | None = "http",
pool_kwargs: dict[str, typing.Any] | None = None,
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(
self, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
if "strict" in request_context:
warnings.warn(
"The 'strict' parameter is no longer needed on Python 3+. "
"This will raise an error in urllib3 v2.1.0.",
DeprecationWarning,
)
request_context.pop("strict")
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(
self, pool_key: PoolKey, request_context: dict[str, typing.Any]
) -> HTTPConnectionPool:
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(
self, url: str, pool_kwargs: dict[str, typing.Any] | None = None
) -> HTTPConnectionPool:
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(
self, override: dict[str, typing.Any] | None
) -> dict[str, typing.Any]:
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url: Url) -> bool:
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def urlopen( # type: ignore[override]
self, method: str, url: str, redirect: bool = True, **kw: typing.Any
) -> BaseHTTPResponse:
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
if u.scheme is None:
warnings.warn(
"URLs without a scheme (ie 'https://') are deprecated and will raise an error "
"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs "
"start with 'https://' or 'http://'. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2920",
category=DeprecationWarning,
stacklevel=2,
)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
# Change the method according to RFC 9110, Section 15.4.4.
method = "GET"
# And lose the body not to transfer anything sensitive.
kw["body"] = None
kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change()
retries = kw.get("retries", response.retries)
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
new_headers = kw["headers"].copy()
for header in kw["headers"]:
if header.lower() in retries.remove_headers_on_redirect:
new_headers.pop(header, None)
kw["headers"] = new_headers
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
| PoolManager |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-integers-to-choose-from-a-range-i.py | {
"start": 1592,
"end": 2049
} | class ____(object):
def maxCount(self, banned, n, maxSum):
"""
:type banned: List[int]
:type n: int
:type maxSum: int
:rtype: int
"""
lookup = set(banned)
result = total = 0
for i in xrange(1, n+1):
if i in lookup:
continue
if total+i > maxSum:
break
total += i
result += 1
return result
| Solution3 |
python | doocs__leetcode | solution/2500-2599/2525.Categorize Box According to Criteria/Solution2.py | {
"start": 0,
"end": 415
} | class ____:
def categorizeBox(self, length: int, width: int, height: int, mass: int) -> str:
v = length * width * height
bulky = any(x >= 10000 for x in (length, width, height)) or v >= 10**9
heavy = mass >= 100
if bulky and heavy:
return "Both"
if bulky:
return "Bulky"
if heavy:
return "Heavy"
return "Neither"
| Solution |
python | pytorch__pytorch | torch/multiprocessing/queue.py | {
"start": 769,
"end": 1123
} | class ____(multiprocessing.queues.Queue):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
self._send = self._writer.send
self._recv = self._reader.recv
| Queue |
python | pytorch__pytorch | torch/onnx/_internal/fx/passes/type_promotion.py | {
"start": 61425,
"end": 64728
} | class ____(_pass.Transform):
"""Explicitly insert type promotion ops to the graph.
Underneath, the main pass is driven by `_TypePromotionInterpreter`, which is a subclass
of `torch.fx.Interpreter` to interpret the fx.Graph and perform the insertion of type
promotion operations.
By re-running the new and modified nodes using the interpreter, we can update the
metadata, specifically the fake tensor stored under node.meta["val"], and ensure it
reflects the latest changes.
"""
def __init__(
self,
module: torch.fx.GraphModule,
type_promotion_table: TypePromotionTable | None = None,
) -> None:
super().__init__(module)
self.interpreter = _TypePromotionInterpreter(
module, type_promotion_table or TypePromotionTable()
)
def _fetch_fake_args(
self,
) -> Sequence[
fake_tensor.FakeTensor
| float
| int
| bool
| torch.SymInt
| torch.SymFloat
| torch.SymBool
| None
]:
"""Fetch fake args from fx graph.
For each argument, try to fetch fake tensor from the matching placeholder node.
"""
fake_args = []
for node in self.module.graph.nodes:
if node.op == "placeholder":
try:
# Meta value can be torch.Tensor, int, float, bool,
# torch.SymInt, torch.SymFloat, torch.SymBool.
meta_value = _val = node.meta.get("val", None)
except RuntimeError as e:
if not node.users:
# If the placeholder is not used, we can safely ignore it and put
# None as placeholder.
meta_value = None
else:
raise RuntimeError(
"Cannot fetch symbolic fake args from fx graph. "
"InsertTypePromotion pass needs to run with pre-existing fake args, "
"Otherwise the pass will produce inaccurate dynamic shape. "
) from e
fake_args.append(meta_value)
return fake_args
def _run(self, *args, **kwargs) -> torch.fx.GraphModule:
assert not args, (
"`InsertTypePromotion` deduces symbolic fake arguments from the graph. "
"It does not accept concrete arguments as input because this pass requires "
"re-running the graph. When executed with newly faked concrete arguments, "
"the pass loses the symbolic dynamic shape information."
)
assert not kwargs, "`kwargs` is not supported"
fake_args = self._fetch_fake_args()
fake_mode = self.fake_mode
assert fake_mode is not None, "Cannot detect fake_mode."
# Use the python dispatcher to run through some python kernels which
# can better handle symints. Without this, some SymInts can become static
# when there are dynamic shapes.
dispatcher_mode = torch._dispatch.python.enable_python_dispatcher()
with fake_mode, dispatcher_mode, fx_traceback.preserve_node_meta():
self.interpreter.run(*fake_args)
return self.module
| InsertTypePromotion |
python | google__jax | jax/_src/pallas/cost_estimate.py | {
"start": 1374,
"end": 1854
} | class ____:
flops: int
transcendentals: int
bytes_accessed: int
def __add__(self, other: 'CostEstimate') -> 'CostEstimate':
return CostEstimate(
flops=self.flops + other.flops,
transcendentals=self.transcendentals + other.transcendentals,
bytes_accessed=self.bytes_accessed + other.bytes_accessed,
)
def register_cost_rule(primitive: jax_core.Primitive, rule):
_cost_rules[primitive] = rule
@dataclasses.dataclass(frozen=True)
| CostEstimate |
python | getsentry__sentry | src/sentry/utils/session_store.py | {
"start": 178,
"end": 3836
} | class ____:
"""
RedisSessionStore provides a convenience object, which when initialized will
store attributes assigned to it into redis. The redis key is stored into
the request session. Useful for storing data too large to be stored into
the session cookie.
The attributes to be backed by Redis must be declared in a subclass using
the `redis_property` function. Do not instantiate RedisSessionStore without
extending it to add properties. For example:
>>> class HotDogSessionStore(RedisSessionStore):
>>> bun = redis_property("bun")
>>> condiment = redis_property("condiment")
NOTE: Assigning attributes immediately saves their value back into the
redis key assigned for this store. Be aware of the multiple
round-trips implication of this.
NOTE: This object is subject to race conditions on updating valeus as the
entire object value is stored in one redis key.
>>> store = RedisSessionStore(request, 'store-name')
>>> store.regenerate()
>>> store.some_value = 'my value'
The value will be available across requests as long as the same same store
name is used.
>>> store.some_value
'my value'
The store may be destroyed before it expires using the ``clear`` method.
>>> store.clear()
It's important to note that the session store will expire if values are not
modified within the provided ttl.
"""
redis_namespace = "session-cache"
def __init__(self, request, prefix, ttl=EXPIRATION_TTL):
self.request = request
self.prefix = prefix
self.ttl = ttl
@property
def _client(self):
return redis.redis_clusters.get(settings.SENTRY_SESSION_STORE_REDIS_CLUSTER)
@property
def session_key(self) -> str:
return f"store:{self.prefix}"
@property
def redis_key(self):
return self.request.session.get(self.session_key)
def mark_session(self):
# Subclasses may override to mark session as modified
pass
def regenerate(self, initial_state=None):
if initial_state is None:
initial_state = {}
redis_key = f"{self.redis_namespace}:{self.prefix}:{uuid4().hex}"
self.request.session[self.session_key] = redis_key
self.mark_session()
value = dumps(initial_state)
self._client.setex(redis_key, self.ttl, value)
def clear(self):
if not self.redis_key:
return
self._client.delete(self.redis_key)
session = self.request.session
del session[self.session_key]
self.mark_session()
def is_valid(self):
return bool(self.redis_key and self.get_state() is not None)
def get_state(self):
if not self.redis_key:
return None
state_json = self._client.get(self.redis_key)
if not state_json:
return None
try:
return loads(state_json)
except Exception as e:
sentry_sdk.capture_exception(e)
return None
def redis_property(key: str):
"""Declare a property backed by Redis on a RedisSessionStore class."""
def getter(store: "RedisSessionStore"):
state = store.get_state()
try:
return state[key] if state else None
except KeyError as e:
raise AttributeError(e)
def setter(store: "RedisSessionStore", value):
state = store.get_state()
if state is None:
return
state[key] = value
store._client.setex(store.redis_key, store.ttl, dumps(state))
return property(getter, setter)
| RedisSessionStore |
python | astropy__astropy | astropy/cosmology/_src/tests/test_realizations.py | {
"start": 817,
"end": 3589
} | class ____:
"""Tests for :class:`~astropy.cosmology.realizations.default_cosmology`."""
# -----------------------------------------------------
# Get
def test_get_current(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` current value."""
cosmo = default_cosmology.get()
assert cosmo is default_cosmology.validate(default_cosmology._value)
# -----------------------------------------------------
# Validate
def test_validate_fail(self):
"""Test :meth:`astropy.cosmology.default_cosmology.validate`."""
# bad input type
with pytest.raises(TypeError, match="must be a string or Cosmology"):
default_cosmology.validate(TypeError)
# a not-valid option, but still a str
with pytest.raises(ValueError, match="Unknown cosmology"):
default_cosmology.validate("fail!")
# a not-valid type
with pytest.raises(TypeError, match="cannot find a Cosmology"):
default_cosmology.validate("available")
def test_validate_default(self):
"""Test method ``validate`` for specific values."""
value = default_cosmology.validate(None)
assert value is realizations.Planck18
@pytest.mark.parametrize("name", parameters.available)
def test_validate_str(self, name):
"""Test method ``validate`` for string input."""
value = default_cosmology.validate(name)
assert value is getattr(realizations, name)
@pytest.mark.parametrize("name", parameters.available)
def test_validate_cosmo(self, name):
"""Test method ``validate`` for cosmology instance input."""
cosmo = getattr(realizations, name)
value = default_cosmology.validate(cosmo)
assert value is cosmo
def test_validate_no_default(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` to `None`."""
cosmo = default_cosmology.validate("no_default")
assert cosmo is None
@pytest.mark.parametrize("name", parameters.available)
def test_pickle_builtin_realizations(name, pickle_protocol):
"""
Test in-built realizations can pickle and unpickle.
Also a regression test for #12008.
"""
# get class instance
original = getattr(cosmology, name)
# pickle and unpickle
f = pickle.dumps(original, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta == original.meta
# if the units are not enabled, it isn't equal because redshift units
# are not equal. This is a weird, known issue.
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta != original.meta
| Test_default_cosmology |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 15336,
"end": 15504
} | class ____(models.Model):
country = models.ForeignKey(
Country, on_delete=models.CASCADE, db_column="countryCode"
)
history = HistoricalRecords()
| City |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias4.py | {
"start": 1553,
"end": 1890
} | class ____:
my_type1: TA = int
def func1():
# This should generate an error because type aliases are allowed
# only in classes or modules.
my_type1: TA = int
_Obj = cast(type[object], object)
# This should generate an error because _Obj is a variable,
# which isn't allowed in a TypeAlias statement.
Obj: TA = _Obj
| ClassB |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 129936,
"end": 130199
} | class ____(BaseModel, extra="forbid"):
"""
Sparse vector structure
"""
indices: List[int] = Field(..., description="Indices must be unique")
values: List[float] = Field(..., description="Values and indices must be the same length")
| SparseVector |
python | imageio__imageio | imageio/plugins/_swf.py | {
"start": 9840,
"end": 10333
} | class ____(ControlTag):
"""Set the color in 0-255, or 0-1 (if floats given)."""
def __init__(self, *rgb):
self.tagtype = 9
if len(rgb) == 1:
rgb = rgb[0]
self.rgb = rgb
def process_tag(self):
bb = bytes()
for i in range(3):
clr = self.rgb[i]
if isinstance(clr, float): # pragma: no cover - not used
clr = clr * 255
bb += int2uint8(clr)
self.bytes = bb
| SetBackgroundTag |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 10979,
"end": 20230
} | class ____(RoleBase):
alias_permissions: List[AliasPermissionOutput]
cluster_permissions: List[ClusterPermissionOutput]
collections_permissions: List[CollectionsPermissionOutput]
data_permissions: List[DataPermissionOutput]
roles_permissions: List[RolesPermissionOutput]
users_permissions: List[UsersPermissionOutput]
backups_permissions: List[BackupsPermissionOutput]
nodes_permissions: List[NodesPermissionOutput]
tenants_permissions: List[TenantsPermissionOutput]
replicate_permissions: List[ReplicatePermissionOutput]
groups_permissions: List[GroupsPermissionOutput]
@property
def permissions(self) -> List[PermissionsOutputType]:
permissions: List[PermissionsOutputType] = []
permissions.extend(self.alias_permissions)
permissions.extend(self.cluster_permissions)
permissions.extend(self.collections_permissions)
permissions.extend(self.data_permissions)
permissions.extend(self.roles_permissions)
permissions.extend(self.users_permissions)
permissions.extend(self.backups_permissions)
permissions.extend(self.nodes_permissions)
permissions.extend(self.tenants_permissions)
permissions.extend(self.replicate_permissions)
permissions.extend(self.groups_permissions)
return permissions
@classmethod
def _from_weaviate_role(cls, role: WeaviateRole) -> "Role":
alias_permissions: List[AliasPermissionOutput] = []
cluster_permissions: List[ClusterPermissionOutput] = []
users_permissions: List[UsersPermissionOutput] = []
collections_permissions: List[CollectionsPermissionOutput] = []
roles_permissions: List[RolesPermissionOutput] = []
data_permissions: List[DataPermissionOutput] = []
backups_permissions: List[BackupsPermissionOutput] = []
nodes_permissions: List[NodesPermissionOutput] = []
tenants_permissions: List[TenantsPermissionOutput] = []
replicate_permissions: List[ReplicatePermissionOutput] = []
groups_permissions: List[GroupsPermissionOutput] = []
for permission in role["permissions"]:
if permission["action"] in ClusterAction.values():
cluster_permissions.append(
ClusterPermissionOutput(actions={ClusterAction(permission["action"])})
)
elif permission["action"] in UsersAction.values():
users = permission.get("users")
if users is not None:
users_permissions.append(
UsersPermissionOutput(
actions={UsersAction(permission["action"])},
users=users["users"],
)
)
elif permission["action"] in CollectionsAction.values():
collections = permission.get("collections")
if collections is not None:
collections_permissions.append(
CollectionsPermissionOutput(
collection=collections["collection"],
actions={CollectionsAction(permission["action"])},
)
)
elif permission["action"] in TenantsAction.values():
tenants = permission.get("tenants")
if tenants is not None:
tenants_permissions.append(
TenantsPermissionOutput(
collection=tenants["collection"],
tenant=tenants.get("tenant", "*"),
actions={TenantsAction(permission["action"])},
)
)
elif permission["action"] in RolesAction.values():
roles = permission.get("roles")
if roles is not None:
scope = roles.get("scope")
roles_permissions.append(
RolesPermissionOutput(
role=roles["role"],
actions={RolesAction(permission["action"])},
scope=RoleScope(scope) if scope else None,
)
)
elif permission["action"] in DataAction.values():
data = permission.get("data")
if data is not None:
data_permissions.append(
DataPermissionOutput(
collection=data["collection"],
tenant=data.get("tenant", "*"),
actions={DataAction(permission["action"])},
)
)
elif permission["action"] in BackupsAction.values():
backups = permission.get("backups")
if backups is not None:
backups_permissions.append(
BackupsPermissionOutput(
collection=backups["collection"],
actions={BackupsAction(permission["action"])},
)
)
elif permission["action"] in NodesAction.values():
nodes = permission.get("nodes")
if nodes is not None:
nodes_permissions.append(
NodesPermissionOutput(
collection=nodes.get("collection", "*"),
verbosity=nodes["verbosity"],
actions={NodesAction(permission["action"])},
)
)
elif permission["action"] in ReplicateAction.values():
replicate = permission.get("replicate")
if replicate is not None:
replicate_permissions.append(
ReplicatePermissionOutput(
collection=replicate["collection"],
shard=replicate.get("shard", "*"),
actions={ReplicateAction(permission["action"])},
)
)
elif permission["action"] in AliasAction.values():
aliases = permission.get("aliases")
if aliases is not None:
alias_permissions.append(
AliasPermissionOutput(
alias=aliases["alias"],
collection=aliases["collection"],
actions={AliasAction(permission["action"])},
)
)
elif permission["action"] in GroupAction.values():
groups = permission.get("groups")
if groups is not None:
groups_permissions.append(
GroupsPermissionOutput(
group=groups["group"],
group_type=groups["groupType"],
actions={GroupAction(permission["action"])},
)
)
else:
_Warnings.unknown_permission_encountered(permission)
return cls(
name=role["name"],
alias_permissions=_join_permissions(alias_permissions),
cluster_permissions=_join_permissions(cluster_permissions),
users_permissions=_join_permissions(users_permissions),
collections_permissions=_join_permissions(collections_permissions),
roles_permissions=_join_permissions(roles_permissions),
groups_permissions=_join_permissions(groups_permissions),
data_permissions=_join_permissions(data_permissions),
backups_permissions=_join_permissions(backups_permissions),
nodes_permissions=_join_permissions(nodes_permissions),
tenants_permissions=_join_permissions(tenants_permissions),
replicate_permissions=_join_permissions(replicate_permissions),
)
T = TypeVar("T", bound=_Permission)
def _join_permissions(permissions: List[T]) -> List[T]:
# permissions with the same resource can be combined and then have multiple actions
unified: Dict[str, int] = {}
for i, perm in enumerate(permissions):
resource = ""
for field in perm.model_fields_set:
if (
field == "actions"
): # action is the one field that is not part of the resource and which we want to combine
continue
resource += field + str(getattr(perm, field)) + "#"
if resource in unified:
permissions[unified[resource]].actions.add(perm.actions.pop())
else:
unified[resource] = i
return_permission: List[T] = []
for i in unified.values():
return_permission.append(permissions[i])
return return_permission
ActionsType = Union[_Action, Sequence[_Action]]
PermissionsInputType = Union[
_Permission,
Sequence[_Permission],
Sequence[Sequence[_Permission]],
Sequence[Union[_Permission, Sequence[_Permission]]],
]
PermissionsCreateType = List[_Permission]
| Role |
python | kamyu104__LeetCode-Solutions | Python/rotate-string.py | {
"start": 1146,
"end": 2394
} | class ____(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
def strStr(haystack, needle):
def KMP(text, pattern):
prefix = getPrefix(pattern)
j = -1
for i in xrange(len(text)):
while j > -1 and pattern[j + 1] != text[i]:
j = prefix[j]
if pattern[j + 1] == text[i]:
j += 1
if j == len(pattern) - 1:
return i - j
return -1
def getPrefix(pattern):
prefix = [-1] * len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j + 1] != pattern[i]:
j = prefix[j]
if pattern[j + 1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
if not needle:
return 0
return KMP(haystack, needle)
if len(A) != len(B):
return False
return strStr(A*2, B) != -1
# Time: O(n^2)
# Space: O(n)
| Solution2 |
python | apache__airflow | providers/google/tests/unit/google/common/hooks/test_base_google.py | {
"start": 42506,
"end": 43804
} | class ____:
"""Test get_field function and _get_field method handle False and other falsy values correctly."""
def test_get_field_returns_false_not_none(self):
"""Test that get_field correctly returns False instead of None."""
extras = {"use_legacy_sql": False}
result = hook.get_field(extras, "use_legacy_sql")
assert result is False
def test_get_field_returns_false_with_prefixed_name(self):
"""Test that get_field correctly returns False with prefixed field name."""
extras = {"extra__google_cloud_platform__use_legacy_sql": False}
result = hook.get_field(extras, "use_legacy_sql")
assert result is False
@mock.patch("airflow.providers.common.compat.sdk.BaseHook.get_connection")
def test_hook_get_field_returns_false_not_default(self, mock_get_connection):
"""Test that _get_field correctly returns False instead of default value."""
mock_connection = mock.MagicMock()
mock_connection.extra_dejson = {"use_legacy_sql": False}
mock_get_connection.return_value = mock_connection
hook_instance = hook.GoogleBaseHook(gcp_conn_id="test_conn")
result = hook_instance._get_field("use_legacy_sql", default=True)
assert result is False
| TestGetFieldWithFalseValues |
python | ethereum__web3.py | web3/_utils/module_testing/go_ethereum_admin_module.py | {
"start": 259,
"end": 1958
} | class ____:
def test_add_peer(self, w3: "Web3") -> None:
result = w3.geth.admin.add_peer(
EnodeURI(
"enode://f1a6b0bdbf014355587c3018454d070ac57801f05d3b39fe85da574f002a32e929f683d72aa5a8318382e4d3c7a05c9b91687b0d997a39619fb8a6e7ad88e512@1.1.1.1:30303" # noqa: E501
),
)
assert result is True
def test_admin_datadir(self, w3: "Web3", datadir: str) -> None:
result = w3.geth.admin.datadir()
assert result == datadir
def test_admin_node_info(self, w3: "Web3") -> None:
result = w3.geth.admin.node_info()
expected = AttributeDict(
{
"id": "",
"name": "",
"enode": "",
"ip": "",
"ports": AttributeDict({}),
"listenAddr": "",
"protocols": AttributeDict({}),
}
)
# Test that result gives at least the keys that are listed in `expected`
assert not set(expected.keys()).difference(result.keys())
def test_admin_peers(self, w3: "Web3") -> None:
enode = w3.geth.admin.node_info()["enode"]
w3.geth.admin.add_peer(enode)
result = w3.geth.admin.peers()
assert len(result) == 1
def test_admin_start_stop_http(self, w3: "Web3") -> None:
stop = w3.geth.admin.stop_http()
assert stop is True
start = w3.geth.admin.start_http()
assert start is True
def test_admin_start_stop_ws(self, w3: "Web3") -> None:
stop = w3.geth.admin.stop_ws()
assert stop is True
start = w3.geth.admin.start_ws()
assert start is True
| GoEthereumAdminModuleTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1022683,
"end": 1044115
} | class ____(
sgqlc.types.Type,
Node,
Actor,
PackageOwner,
ProjectOwner,
ProjectNextOwner,
ProjectV2Owner,
ProjectV2Recent,
RepositoryDiscussionAuthor,
RepositoryDiscussionCommentAuthor,
RepositoryOwner,
UniformResourceLocatable,
ProfileOwner,
Sponsorable,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"bio",
"bio_html",
"can_receive_organization_emails_when_notifications_restricted",
"commit_comments",
"company",
"company_html",
"contributions_collection",
"created_at",
"database_id",
"followers",
"following",
"gist",
"gist_comments",
"gists",
"hovercard",
"interaction_ability",
"is_bounty_hunter",
"is_campus_expert",
"is_developer_program_member",
"is_employee",
"is_following_viewer",
"is_git_hub_star",
"is_hireable",
"is_site_admin",
"is_viewer",
"issue_comments",
"issues",
"organization",
"organization_verified_domain_emails",
"organizations",
"public_keys",
"pull_requests",
"repositories_contributed_to",
"saved_replies",
"starred_repositories",
"status",
"top_repositories",
"twitter_username",
"updated_at",
"viewer_can_follow",
"viewer_is_following",
"watching",
)
bio = sgqlc.types.Field(String, graphql_name="bio")
bio_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="bioHTML")
can_receive_organization_emails_when_notifications_restricted = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="canReceiveOrganizationEmailsWhenNotificationsRestricted",
args=sgqlc.types.ArgDict(
(
(
"login",
sgqlc.types.Arg(
sgqlc.types.non_null(String), graphql_name="login", default=None
),
),
)
),
)
commit_comments = sgqlc.types.Field(
sgqlc.types.non_null(CommitCommentConnection),
graphql_name="commitComments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
company = sgqlc.types.Field(String, graphql_name="company")
company_html = sgqlc.types.Field(
sgqlc.types.non_null(HTML), graphql_name="companyHTML"
)
contributions_collection = sgqlc.types.Field(
sgqlc.types.non_null(ContributionsCollection),
graphql_name="contributionsCollection",
args=sgqlc.types.ArgDict(
(
(
"organization_id",
sgqlc.types.Arg(ID, graphql_name="organizationID", default=None),
),
("from_", sgqlc.types.Arg(DateTime, graphql_name="from", default=None)),
("to", sgqlc.types.Arg(DateTime, graphql_name="to", default=None)),
)
),
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
followers = sgqlc.types.Field(
sgqlc.types.non_null(FollowerConnection),
graphql_name="followers",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
following = sgqlc.types.Field(
sgqlc.types.non_null(FollowingConnection),
graphql_name="following",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
gist = sgqlc.types.Field(
Gist,
graphql_name="gist",
args=sgqlc.types.ArgDict(
(
(
"name",
sgqlc.types.Arg(
sgqlc.types.non_null(String), graphql_name="name", default=None
),
),
)
),
)
gist_comments = sgqlc.types.Field(
sgqlc.types.non_null(GistCommentConnection),
graphql_name="gistComments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
gists = sgqlc.types.Field(
sgqlc.types.non_null(GistConnection),
graphql_name="gists",
args=sgqlc.types.ArgDict(
(
(
"privacy",
sgqlc.types.Arg(GistPrivacy, graphql_name="privacy", default=None),
),
(
"order_by",
sgqlc.types.Arg(GistOrder, graphql_name="orderBy", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
hovercard = sgqlc.types.Field(
sgqlc.types.non_null(Hovercard),
graphql_name="hovercard",
args=sgqlc.types.ArgDict(
(
(
"primary_subject_id",
sgqlc.types.Arg(ID, graphql_name="primarySubjectId", default=None),
),
)
),
)
interaction_ability = sgqlc.types.Field(
RepositoryInteractionAbility, graphql_name="interactionAbility"
)
is_bounty_hunter = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isBountyHunter"
)
is_campus_expert = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isCampusExpert"
)
is_developer_program_member = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isDeveloperProgramMember"
)
is_employee = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isEmployee"
)
is_following_viewer = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isFollowingViewer"
)
is_git_hub_star = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isGitHubStar"
)
is_hireable = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isHireable"
)
is_site_admin = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isSiteAdmin"
)
is_viewer = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isViewer"
)
issue_comments = sgqlc.types.Field(
sgqlc.types.non_null(IssueCommentConnection),
graphql_name="issueComments",
args=sgqlc.types.ArgDict(
(
(
"order_by",
sgqlc.types.Arg(
IssueCommentOrder, graphql_name="orderBy", default=None
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
issues = sgqlc.types.Field(
sgqlc.types.non_null(IssueConnection),
graphql_name="issues",
args=sgqlc.types.ArgDict(
(
(
"order_by",
sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None),
),
(
"labels",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="labels",
default=None,
),
),
(
"states",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(IssueState)),
graphql_name="states",
default=None,
),
),
(
"filter_by",
sgqlc.types.Arg(
IssueFilters, graphql_name="filterBy", default=None
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
organization = sgqlc.types.Field(
Organization,
graphql_name="organization",
args=sgqlc.types.ArgDict(
(
(
"login",
sgqlc.types.Arg(
sgqlc.types.non_null(String), graphql_name="login", default=None
),
),
)
),
)
organization_verified_domain_emails = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))),
graphql_name="organizationVerifiedDomainEmails",
args=sgqlc.types.ArgDict(
(
(
"login",
sgqlc.types.Arg(
sgqlc.types.non_null(String), graphql_name="login", default=None
),
),
)
),
)
organizations = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationConnection),
graphql_name="organizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
public_keys = sgqlc.types.Field(
sgqlc.types.non_null(PublicKeyConnection),
graphql_name="publicKeys",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
pull_requests = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestConnection),
graphql_name="pullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)),
graphql_name="states",
default=None,
),
),
(
"labels",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="labels",
default=None,
),
),
(
"head_ref_name",
sgqlc.types.Arg(String, graphql_name="headRefName", default=None),
),
(
"base_ref_name",
sgqlc.types.Arg(String, graphql_name="baseRefName", default=None),
),
(
"order_by",
sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
repositories_contributed_to = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="repositoriesContributedTo",
args=sgqlc.types.ArgDict(
(
(
"privacy",
sgqlc.types.Arg(
RepositoryPrivacy, graphql_name="privacy", default=None
),
),
(
"order_by",
sgqlc.types.Arg(
RepositoryOrder, graphql_name="orderBy", default=None
),
),
(
"is_locked",
sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None),
),
(
"include_user_repositories",
sgqlc.types.Arg(
Boolean, graphql_name="includeUserRepositories", default=None
),
),
(
"contribution_types",
sgqlc.types.Arg(
sgqlc.types.list_of(RepositoryContributionType),
graphql_name="contributionTypes",
default=None,
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
saved_replies = sgqlc.types.Field(
SavedReplyConnection,
graphql_name="savedReplies",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
SavedReplyOrder,
graphql_name="orderBy",
default={"field": "UPDATED_AT", "direction": "DESC"},
),
),
)
),
)
starred_repositories = sgqlc.types.Field(
sgqlc.types.non_null(StarredRepositoryConnection),
graphql_name="starredRepositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"owned_by_viewer",
sgqlc.types.Arg(
Boolean, graphql_name="ownedByViewer", default=None
),
),
(
"order_by",
sgqlc.types.Arg(StarOrder, graphql_name="orderBy", default=None),
),
)
),
)
status = sgqlc.types.Field("UserStatus", graphql_name="status")
top_repositories = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="topRepositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
sgqlc.types.non_null(RepositoryOrder),
graphql_name="orderBy",
default=None,
),
),
(
"since",
sgqlc.types.Arg(DateTime, graphql_name="since", default=None),
),
)
),
)
twitter_username = sgqlc.types.Field(String, graphql_name="twitterUsername")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
viewer_can_follow = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanFollow"
)
viewer_is_following = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerIsFollowing"
)
watching = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="watching",
args=sgqlc.types.ArgDict(
(
(
"privacy",
sgqlc.types.Arg(
RepositoryPrivacy, graphql_name="privacy", default=None
),
),
(
"order_by",
sgqlc.types.Arg(
RepositoryOrder, graphql_name="orderBy", default=None
),
),
(
"affiliations",
sgqlc.types.Arg(
sgqlc.types.list_of(RepositoryAffiliation),
graphql_name="affiliations",
default=None,
),
),
(
"owner_affiliations",
sgqlc.types.Arg(
sgqlc.types.list_of(RepositoryAffiliation),
graphql_name="ownerAffiliations",
default=("OWNER", "COLLABORATOR"),
),
),
(
"is_locked",
sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| User |
python | plotly__plotly.py | plotly/graph_objs/bar/marker/colorbar/_title.py | {
"start": 233,
"end": 3992
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar.marker.colorbar"
_path_str = "bar.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.bar.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.marker.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.marker.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | ApeWorX__ape | src/ape/managers/project.py | {
"start": 20827,
"end": 36029
} | class ____(BaseManager, ExtraAttributesMixin):
"""
A wrapper around a dependency.
Users will not create this class directly but access
them from ``project.dependencies``.
"""
def __init__(self, api: DependencyAPI, project: Optional["ProjectManager"] = None):
self.api = api
# This is the base project using this dependency.
self.base_project = project or self.local_project
# When installed (and set, lazily), this is the dependency project.
self._installation: Optional[ProjectManager] = None
self._tried_fetch = False
@log_instead_of_fail(default="<Dependency>")
def __repr__(self) -> str:
return repr(self.api)
def __hash__(self):
return hash(f"{self.package_id}@{self.version}")
@only_raise_attribute_error
def __getattr__(self, name: str) -> Any:
return get_attribute_with_extras(self, name)
def __ape_extra_attributes__(self) -> Iterator[ExtraModelAttributes]:
yield ExtraModelAttributes(name="project", attributes=lambda: self.project)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Dependency):
# We can't handle this type.
# This line causes python to retry from the other end.
return NotImplemented
return self.package_id == other.package_id and self.version == other.version
@property
def name(self) -> str:
"""
The short-name of the dependency, used for remappings.
"""
return self.api.name
@property
def package_id(self) -> str:
"""
The long-name of the dependency, used as an ID.
"""
return self.api.package_id
@property
def clean_package_id(self) -> str:
"""
The package ID hiding the user information.
"""
return self.api.package_id.replace(f"{Path.home()}", "$HOME")
@property
def version(self) -> str:
"""
The version of the dependency. Combined with the
package_id, you have a full identifier of the package.
"""
return self.api.version_id
@property
def project(self) -> "ProjectManager":
"""
The "project" of the dependency, use like any other
project in Ape (compile and interact with its contracts).
"""
return self.install()
@property
def _cache(self) -> "PackagesCache":
return self.base_project.dependencies.packages_cache
@property
def project_path(self) -> Path:
"""
The path to the dependency's project root. When installing, this
is where the project files go.
"""
return self._cache.get_project_path(self.package_id, self.version)
@property
def _project_disk_cache_exists(self) -> bool:
path = self._cache.get_project_path(self.package_id, self.version)
if not path.is_dir():
return False
# Ensure there are actually project files in here.
return len([x for x in self.project_path.iterdir() if not x.name.startswith(".")]) > 0
@property
def manifest_path(self) -> Path:
"""
The path to the dependency's manifest. When compiling, the artifacts go here.
"""
return self._cache.get_manifest_path(self.package_id, self.version)
@property
def api_path(self) -> Path:
"""
The path to the dependency's API data-file. This data is necessary
for managing the installation of the dependency.
"""
return self._cache.get_api_path(self.package_id, self.version)
@property
def installed(self) -> bool:
"""
``True`` when a project is available. Note: Installed does not mean
the dependency is compiled!
"""
if self._installation is not None:
return True
try:
project_path = self.project_path
except ProjectError:
# Fails when version ID errors out (bad config / missing required install etc.)
return False
if project_path.is_dir():
if any(x for x in self.project_path.iterdir() if not x.name.startswith(".")):
return True
return False
@property
def compiled(self) -> bool:
"""
True if installed and compiled.
"""
return self.installed and self.project.is_compiled
@property
def uri(self) -> str:
"""
The dependency's URI for refreshing.
"""
return self.api.uri
def install(
self,
use_cache: bool = True,
config_override: Optional[dict] = None,
recurse: bool = True,
) -> "ProjectManager":
"""
Install this dependency.
Args:
use_cache (bool): To force reinstalling, like a refresh, set this
to ``False``.
config_override (dict): Optionally change the configuration during install.
recurse (bool): Set to ``False`` to avoid installing dependency of dependencies.
Returns:
:class:`~ape.managers.project.ProjectManager`: The resulting project, ready
for compiling.
"""
config_override = {
**(self.api.config_override or {}),
**(config_override or {}),
}
project = None
did_fetch = False
# Check and used already installed project if we can.
if self._installation is not None and use_cache:
# Already has a cached installation.
if config_override:
self._installation.reconfigure(**config_override)
return self._installation
elif not self._project_disk_cache_exists or not use_cache:
# Project does not yet exist in the cache. We have to fetch the sources.
unpacked = False
if use_cache and self.manifest_path.is_file():
# Attempt using sources from manifest. This may happen
# if having deleted dependencies but not their manifests.
man = PackageManifest.model_validate_json(
self.manifest_path.read_text(encoding="utf8")
)
if man.sources:
self.project_path.mkdir(parents=True, exist_ok=True)
man.unpack_sources(self.project_path)
unpacked = True
# Either never fetched, it is missing but present in manifest, or we are forcing.
if not unpacked and not self._tried_fetch:
logger.debug(f"Fetching {self.api.package_id} {self.api.version_id}")
# No sources found! Fetch the project.
shutil.rmtree(self.project_path, ignore_errors=True)
self.project_path.parent.mkdir(parents=True, exist_ok=True)
self._tried_fetch = True
logger.info(f"Installing {self.clean_package_id} {self.api.version_id}")
try:
self.api.fetch(self.project_path)
except Exception as err:
raise ProjectError(f"Fetching failed: {err}")
did_fetch = True
# Reset global tried-fetch if it succeeded, so it can refresh.
self._tried_fetch = False
# Set name / version for the project, if it needs.
if "name" not in config_override:
config_override["name"] = self.api.name
if "version" not in config_override:
config_override["version"] = self.api.version_id
if self.project_path.is_dir():
paths = get_all_files_in_directory(self.project_path)
# Check if given only a manifest.
if len(paths) == 1:
suffix = get_full_extension(paths[0])
if suffix == ".json":
path = paths[0]
try:
manifest = PackageManifest.model_validate_json(
path.read_text(encoding="utf8")
)
except Exception:
# False alarm.
pass
else:
# Using a manifest project, unless this is just emptiness.
if (
manifest.sources
or manifest.contract_types
or manifest.name
or manifest.version
):
project = Project.from_manifest(
manifest, config_override=config_override
)
if project is None:
# Using an unpacked local-project.
project = LocalProject(
self.project_path,
manifest_path=self.manifest_path,
config_override=config_override,
)
elif self.manifest_path.is_file():
# Manifest-only project with manifest populated and not project-dir.
project = Project.from_manifest(self.manifest_path, config_override=config_override)
else:
raise ProjectError("Project install failed.")
# Cache for next time.
self._installation = project
# Install dependencies of dependencies if fetching for the first time.
if did_fetch and recurse:
spec = project.dependencies.get_project_dependencies(use_cache=use_cache)
list(spec)
return project
def uninstall(self):
self._cache.remove(self.package_id, self.version)
self._installation = None
def compile(
self,
use_cache: bool = True,
config_override: Optional[dict] = None,
allow_install: bool = False,
) -> dict[str, ContractContainer]:
"""
Compile a dependency.
Args:
use_cache (bool): Set to ``False`` to force a re-compile.
config_override (Optional[dict]): Optionally override the configuration,
which may be needed for compiling.
allow_install (bool): Set to ``True`` to allow installing.
Returns:
dict[str, :class:`~ape.contracts.ContractContainer`]
"""
override = {**self.api.config_override, **(config_override or {})}
self.api.config_override = override
if not self.installed and allow_install:
project = self.install()
else:
# Will raise if not installed and allow_install=False.
project = self.project
if override:
# Ensure is using most up-to-date config override.
project.reconfigure(**override)
self._cache.cache_api(self.api)
if result := project.load_contracts(use_cache=use_cache):
return result
# Failed to get any contract types out of the dependency project.
# Try to figure out the best reason as to why this happened.
contracts_folder = project.contracts_folder
message = "Compiling dependency produced no contract types."
if isinstance(project, LocalProject):
all_files = [x.name for x in get_all_files_in_directory(contracts_folder)]
has_solidity_sources = any(get_full_extension(Path(x)) == ".sol" for x in all_files)
has_vyper_sources = any(
get_full_extension(Path(x)) in (".vy", ".vyi") for x in all_files
)
compilers = self.compiler_manager.registered_compilers
warn_sol = has_solidity_sources and ".sol" not in compilers
warn_vyper = has_vyper_sources and ".vy" not in compilers
suffix = ""
if warn_sol:
suffix = "Try installing 'ape-solidity'"
if warn_vyper:
suffix += " or 'ape-vyper'"
elif warn_vyper:
suffix = "Try installing 'ape-vyper'"
elif len(all_files) == 0:
suffix = f"No source files found! (contracts_folder={clean_path(contracts_folder)})"
if suffix:
message = f"{message} {suffix}."
logger.warning(message)
return {}
def unpack(self, path: Path) -> Iterator["Dependency"]:
"""
Move dependencies into a .cache folder. Also unpacks
dependencies of dependencies. Ideal for tmp-projects.
Args:
path (Path): The destination where to unpack sources.
Returns:
Iterates over every dependency unpacked, so the user
knows the dependencies of dependencies.
"""
yield from self._unpack(path, set())
def _unpack(self, path: Path, tracked: set[str]) -> Iterator["Dependency"]:
key = self.package_id
if key in tracked:
return
tracked.add(key)
# NOTE: Don't do the same weird path-ify thing for
# the in-contracts .cache folder. Short names work here.
folder = path / self.name / self.version
if not folder.is_dir():
# Not yet unpacked.
if isinstance(self.project, LocalProject):
contracts_folder_id = get_relative_path(
self.project.contracts_folder, self.project.path
)
destination = folder / contracts_folder_id
destination.parent.mkdir(parents=True, exist_ok=True)
if self.project.contracts_folder.is_dir():
shutil.copytree(self.project.contracts_folder, destination)
else:
# Will create contracts folder from source IDs.
folder.parent.mkdir(parents=True, exist_ok=True)
self.project.manifest.unpack_sources(folder)
# self is done!
yield self
# Unpack dependencies of dependencies (if they aren't already).
for dependency in self.project.dependencies.specified:
for unpacked_dep in dependency._unpack(path, tracked=tracked):
yield unpacked_dep
def _get_cache_versions_suffix(package_id) -> Path:
package_id_name = package_id.replace("/", "_")
return Path(package_id_name)
def _get_cache_suffix(package_id: str, version: str, suffix: str = "") -> Path:
package_id_path = _get_cache_versions_suffix(package_id)
version_name = f"{version.replace('.', '_').replace('/', '_')}{suffix}"
return package_id_path / version_name
def _get_cache_path(
base_path: Path,
package_id: str,
version: str,
is_dir: bool = False,
suffix: str = "",
) -> Path:
options = _version_to_options(version)
original = None
for option in options:
path = base_path / _get_cache_suffix(package_id, option, suffix=suffix)
if original is None:
# The 'original' is the first option.
original = path
if (is_dir and path.is_dir()) or (not is_dir and path.is_file()):
return path
# Return original - may not be created yet!
assert original is not None # For mypy.
return original
| Dependency |
python | django__django | tests/test_runner/tests.py | {
"start": 21649,
"end": 23251
} | class ____(SimpleTestCase):
@mock.patch.object(multiprocessing, "get_start_method", return_value="spawn")
@mock.patch(
"django.test.runner.ParallelTestSuite.initialize_suite",
side_effect=Exception("initialize_suite() is called."),
)
def test_no_initialize_suite_test_runner(self, *mocked_objects):
"""
The test suite's initialize_suite() method must always be called when
using spawn. It cannot rely on a test runner implementation.
"""
class NoInitializeSuiteTestRunner(DiscoverRunner):
def setup_test_environment(self, **kwargs):
return
def setup_databases(self, **kwargs):
return
def run_checks(self, databases):
return
def teardown_databases(self, old_config, **kwargs):
return
def teardown_test_environment(self, **kwargs):
return
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
with self.assertRaisesMessage(Exception, "initialize_suite() is called."):
runner = NoInitializeSuiteTestRunner(
verbosity=0, interactive=False, parallel=2
)
runner.run_tests(
[
"test_runner_apps.sample.tests_sample.TestDjangoTestCase",
"test_runner_apps.simple.tests",
]
)
| NoInitializeSuiteTestRunnerTests |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 39930,
"end": 40846
} | class ____(FlowRunStateChangeAction):
"""Changes the state of a flow run associated with the trigger"""
type: Literal["change-flow-run-state"] = "change-flow-run-state"
name: Optional[str] = Field(
None,
description="The name of the state to change the flow run to",
)
state: StateType = Field(
...,
description="The type of the state to change the flow run to",
)
message: Optional[str] = Field(
None,
description="An optional message to associate with the state change",
)
async def new_state(self, triggered_action: "TriggeredAction") -> StateCreate:
message = (
self.message
or f"State changed by Automation {triggered_action.automation.id}"
)
return StateCreate(
name=self.name,
type=self.state,
message=message,
)
| ChangeFlowRunState |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/command_parser_test.py | {
"start": 10861,
"end": 12613
} | class ____(test_util.TensorFlowTestCase):
INF_VALUE = sys.float_info.max
def testParseEmptyRangeString(self):
self.assertEqual([], command_parser.parse_ranges(""))
self.assertEqual([], command_parser.parse_ranges(" "))
def testParseSingleRange(self):
self.assertAllClose([[-0.1, 0.2]],
command_parser.parse_ranges("[-0.1, 0.2]"))
self.assertAllClose([[-0.1, self.INF_VALUE]],
command_parser.parse_ranges("[-0.1, inf]"))
self.assertAllClose([[-self.INF_VALUE, self.INF_VALUE]],
command_parser.parse_ranges("[-inf, inf]"))
def testParseSingleListOfRanges(self):
self.assertAllClose([[-0.1, 0.2], [10.0, 12.0]],
command_parser.parse_ranges("[[-0.1, 0.2], [10, 12]]"))
self.assertAllClose(
[[-self.INF_VALUE, -1.0], [1.0, self.INF_VALUE]],
command_parser.parse_ranges("[[-inf, -1.0],[1.0, inf]]"))
def testParseInvalidRangeString(self):
with self.assertRaises(SyntaxError):
command_parser.parse_ranges("[[1,2]")
with self.assertRaisesRegex(ValueError,
"Incorrect number of elements in range"):
command_parser.parse_ranges("[1,2,3]")
with self.assertRaisesRegex(ValueError,
"Incorrect number of elements in range"):
command_parser.parse_ranges("[inf]")
with self.assertRaisesRegex(ValueError,
"Incorrect type in the 1st element of range"):
command_parser.parse_ranges("[1j, 1]")
with self.assertRaisesRegex(ValueError,
"Incorrect type in the 2nd element of range"):
command_parser.parse_ranges("[1, 1j]")
| ParseRangesTest |
python | viewflow__viewflow | viewflow/workflow/nodes/start.py | {
"start": 1368,
"end": 1955
} | class ____(StartActivation):
@Activation.status.transition(
source=STATUS.DONE,
target=STATUS.CANCELED,
conditions=[leading_tasks_canceled],
permission=has_manage_permission,
)
def undo(self):
# undo
if self.flow_task._undo_func is not None:
self.flow_task._undo_func(self)
# cancel process
self.process.finished = now()
self.process.status = PROCESS.CANCELED
self.process.save()
# finish task
self.task.finished = now()
self.task.save()
| StartHandleActivation |
python | faif__python-patterns | tests/creational/test_prototype.py | {
"start": 92,
"end": 949
} | class ____(unittest.TestCase):
def setUp(self):
self.prototype = Prototype()
def test_cloning_propperty_innate_values(self):
sample_object_1 = self.prototype.clone()
sample_object_2 = self.prototype.clone()
self.assertEqual(sample_object_1.value, sample_object_2.value)
def test_extended_property_values_cloning(self):
sample_object_1 = self.prototype.clone()
sample_object_1.some_value = "test string"
sample_object_2 = self.prototype.clone()
self.assertRaises(AttributeError, lambda: sample_object_2.some_value)
def test_cloning_propperty_assigned_values(self):
sample_object_1 = self.prototype.clone()
sample_object_2 = self.prototype.clone(value="re-assigned")
self.assertNotEqual(sample_object_1.value, sample_object_2.value)
| TestPrototypeFeatures |
python | python__mypy | mypy/checker.py | {
"start": 377844,
"end": 392623
} | class ____(TypeTraverserVisitor):
"""Collects the non-nested argument types in a set."""
def __init__(self) -> None:
self.arg_types: set[TypeVarType] = set()
def visit_type_var(self, t: TypeVarType) -> None:
self.arg_types.add(t)
@overload
def conditional_types(
current_type: Type,
proposed_type_ranges: list[TypeRange] | None,
default: None = None,
*,
consider_runtime_isinstance: bool = True,
) -> tuple[Type | None, Type | None]: ...
@overload
def conditional_types(
current_type: Type,
proposed_type_ranges: list[TypeRange] | None,
default: Type,
*,
consider_runtime_isinstance: bool = True,
) -> tuple[Type, Type]: ...
def conditional_types(
current_type: Type,
proposed_type_ranges: list[TypeRange] | None,
default: Type | None = None,
*,
consider_runtime_isinstance: bool = True,
) -> tuple[Type | None, Type | None]:
"""Takes in the current type and a proposed type of an expression.
Returns a 2-tuple:
The first element is the proposed type, if the expression can be the proposed type.
(or default, if default is set and the expression is a subtype of the proposed type).
The second element is the type it would hold if it was not the proposed type, if any.
(or default, if default is set and the expression is not a subtype of the proposed type).
UninhabitedType means unreachable.
None means no new information can be inferred.
"""
if proposed_type_ranges is None:
# An isinstance check, but we don't understand the type
return current_type, default
if not proposed_type_ranges:
# This is the case for `if isinstance(x, ())` which always returns False.
return UninhabitedType(), default
if len(proposed_type_ranges) == 1:
# expand e.g. bool -> Literal[True] | Literal[False]
target = proposed_type_ranges[0].item
target = get_proper_type(target)
if isinstance(target, LiteralType) and (
target.is_enum_literal() or isinstance(target.value, bool)
):
enum_name = target.fallback.type.fullname
current_type = try_expanding_sum_type_to_union(current_type, enum_name)
proper_type = get_proper_type(current_type)
# factorize over union types: isinstance(A|B, C) -> yes = A_yes | B_yes
if isinstance(proper_type, UnionType):
result: list[tuple[Type | None, Type | None]] = [
conditional_types(
union_item,
proposed_type_ranges,
default=union_item,
consider_runtime_isinstance=consider_runtime_isinstance,
)
for union_item in get_proper_types(proper_type.items)
]
# separate list of tuples into two lists
yes_types, no_types = zip(*result)
proposed_type = make_simplified_union([t for t in yes_types if t is not None])
else:
proposed_items = [type_range.item for type_range in proposed_type_ranges]
proposed_type = make_simplified_union(proposed_items)
if isinstance(proper_type, AnyType):
return proposed_type, current_type
elif isinstance(proposed_type, AnyType):
# We don't really know much about the proposed type, so we shouldn't
# attempt to narrow anything. Instead, we broaden the expr to Any to
# avoid false positives
return proposed_type, default
elif not any(type_range.is_upper_bound for type_range in proposed_type_ranges) and (
# concrete subtypes
is_proper_subtype(current_type, proposed_type, ignore_promotions=True)
# structural subtypes
or (
(
isinstance(proposed_type, CallableType)
or (isinstance(proposed_type, Instance) and proposed_type.type.is_protocol)
)
and is_subtype(current_type, proposed_type, ignore_promotions=True)
)
):
# Expression is always of one of the types in proposed_type_ranges
return default, UninhabitedType()
elif not is_overlapping_types(current_type, proposed_type, ignore_promotions=True):
# Expression is never of any type in proposed_type_ranges
return UninhabitedType(), default
else:
# we can only restrict when the type is precise, not bounded
proposed_precise_type = UnionType.make_union(
[
type_range.item
for type_range in proposed_type_ranges
if not type_range.is_upper_bound
]
)
remaining_type = restrict_subtype_away(
current_type,
proposed_precise_type,
consider_runtime_isinstance=consider_runtime_isinstance,
)
return proposed_type, remaining_type
def conditional_types_to_typemaps(
expr: Expression, yes_type: Type | None, no_type: Type | None
) -> tuple[TypeMap, TypeMap]:
expr = collapse_walrus(expr)
maps: list[TypeMap] = []
for typ in (yes_type, no_type):
proper_type = get_proper_type(typ)
if isinstance(proper_type, UninhabitedType):
maps.append(None)
elif proper_type is None:
maps.append({})
else:
assert typ is not None
maps.append({expr: typ})
return cast(tuple[TypeMap, TypeMap], tuple(maps))
def gen_unique_name(base: str, table: SymbolTable) -> str:
"""Generate a name that does not appear in table by appending numbers to base."""
if base not in table:
return base
i = 1
while base + str(i) in table:
i += 1
return base + str(i)
def is_true_literal(n: Expression) -> bool:
"""Returns true if this expression is the 'True' literal/keyword."""
return refers_to_fullname(n, "builtins.True") or isinstance(n, IntExpr) and n.value != 0
def is_false_literal(n: Expression) -> bool:
"""Returns true if this expression is the 'False' literal/keyword."""
return refers_to_fullname(n, "builtins.False") or isinstance(n, IntExpr) and n.value == 0
def is_literal_none(n: Expression) -> bool:
"""Returns true if this expression is the 'None' literal/keyword."""
return isinstance(n, NameExpr) and n.fullname == "builtins.None"
def is_literal_not_implemented(n: Expression | None) -> bool:
return isinstance(n, NameExpr) and n.fullname == "builtins.NotImplemented"
def _is_empty_generator_function(func: FuncItem) -> bool:
"""
Checks whether a function's body is 'return; yield' (the yield being added only
to promote the function into a generator function).
"""
body = func.body.body
return (
len(body) == 2
and isinstance(ret_stmt := body[0], ReturnStmt)
and (ret_stmt.expr is None or is_literal_none(ret_stmt.expr))
and isinstance(expr_stmt := body[1], ExpressionStmt)
and isinstance(yield_expr := expr_stmt.expr, YieldExpr)
and (yield_expr.expr is None or is_literal_none(yield_expr.expr))
)
def builtin_item_type(tp: Type) -> Type | None:
"""Get the item type of a builtin container.
If 'tp' is not one of the built containers (these includes NamedTuple and TypedDict)
or if the container is not parameterized (like List or List[Any])
return None. This function is used to narrow optional types in situations like this:
x: Optional[int]
if x in (1, 2, 3):
x + 42 # OK
Note: this is only OK for built-in containers, where we know the behavior
of __contains__.
"""
tp = get_proper_type(tp)
if isinstance(tp, Instance):
if tp.type.fullname in [
"builtins.list",
"builtins.tuple",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"_collections_abc.dict_keys",
"typing.KeysView",
]:
if not tp.args:
# TODO: fix tuple in lib-stub/builtins.pyi (it should be generic).
return None
if not isinstance(get_proper_type(tp.args[0]), AnyType):
return tp.args[0]
elif isinstance(tp, TupleType):
normalized_items = []
for it in tp.items:
# This use case is probably rare, but not handling unpacks here can cause crashes.
if isinstance(it, UnpackType):
unpacked = get_proper_type(it.type)
if isinstance(unpacked, TypeVarTupleType):
unpacked = get_proper_type(unpacked.upper_bound)
assert (
isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
)
normalized_items.append(unpacked.args[0])
else:
normalized_items.append(it)
if all(not isinstance(it, AnyType) for it in get_proper_types(normalized_items)):
return make_simplified_union(normalized_items) # this type is not externally visible
elif isinstance(tp, TypedDictType):
# TypedDict always has non-optional string keys. Find the key type from the Mapping
# base class.
for base in tp.fallback.type.mro:
if base.fullname == "typing.Mapping":
return map_instance_to_supertype(tp.fallback, base).args[0]
assert False, "No Mapping base class found for TypedDict fallback"
return None
def and_conditional_maps(m1: TypeMap, m2: TypeMap, use_meet: bool = False) -> TypeMap:
"""Calculate what information we can learn from the truth of (e1 and e2)
in terms of the information that we can learn from the truth of e1 and
the truth of e2.
"""
if m1 is None or m2 is None:
# One of the conditions can never be true.
return None
# Both conditions can be true; combine the information. Anything
# we learn from either conditions' truth is valid. If the same
# expression's type is refined by both conditions, we somewhat
# arbitrarily give precedence to m2 unless m1 value is Any.
# In the future, we could use an intersection type or meet_types().
result = m2.copy()
m2_keys = {literal_hash(n2) for n2 in m2}
for n1 in m1:
if literal_hash(n1) not in m2_keys or isinstance(get_proper_type(m1[n1]), AnyType):
result[n1] = m1[n1]
if use_meet:
# For now, meet common keys only if specifically requested.
# This is currently used for tuple types narrowing, where having
# a precise result is important.
for n1 in m1:
for n2 in m2:
if literal_hash(n1) == literal_hash(n2):
result[n1] = meet_types(m1[n1], m2[n2])
return result
def or_conditional_maps(m1: TypeMap, m2: TypeMap, coalesce_any: bool = False) -> TypeMap:
"""Calculate what information we can learn from the truth of (e1 or e2)
in terms of the information that we can learn from the truth of e1 and
the truth of e2. If coalesce_any is True, consider Any a supertype when
joining restrictions.
"""
if m1 is None:
return m2
if m2 is None:
return m1
# Both conditions can be true. Combine information about
# expressions whose type is refined by both conditions. (We do not
# learn anything about expressions whose type is refined by only
# one condition.)
result: dict[Expression, Type] = {}
for n1 in m1:
for n2 in m2:
if literal_hash(n1) == literal_hash(n2):
if coalesce_any and isinstance(get_proper_type(m1[n1]), AnyType):
result[n1] = m1[n1]
else:
result[n1] = make_simplified_union([m1[n1], m2[n2]])
return result
def reduce_conditional_maps(
type_maps: list[tuple[TypeMap, TypeMap]], use_meet: bool = False
) -> tuple[TypeMap, TypeMap]:
"""Reduces a list containing pairs of if/else TypeMaps into a single pair.
We "and" together all of the if TypeMaps and "or" together the else TypeMaps. So
for example, if we had the input:
[
({x: TypeIfX, shared: TypeIfShared1}, {x: TypeElseX, shared: TypeElseShared1}),
({y: TypeIfY, shared: TypeIfShared2}, {y: TypeElseY, shared: TypeElseShared2}),
]
...we'd return the output:
(
{x: TypeIfX, y: TypeIfY, shared: PseudoIntersection[TypeIfShared1, TypeIfShared2]},
{shared: Union[TypeElseShared1, TypeElseShared2]},
)
...where "PseudoIntersection[X, Y] == Y" because mypy actually doesn't understand intersections
yet, so we settle for just arbitrarily picking the right expr's type.
We only retain the shared expression in the 'else' case because we don't actually know
whether x was refined or y was refined -- only just that one of the two was refined.
"""
if len(type_maps) == 0:
return {}, {}
elif len(type_maps) == 1:
return type_maps[0]
else:
final_if_map, final_else_map = type_maps[0]
for if_map, else_map in type_maps[1:]:
final_if_map = and_conditional_maps(final_if_map, if_map, use_meet=use_meet)
final_else_map = or_conditional_maps(final_else_map, else_map)
return final_if_map, final_else_map
def convert_to_typetype(type_map: TypeMap) -> TypeMap:
converted_type_map: dict[Expression, Type] = {}
if type_map is None:
return None
for expr, typ in type_map.items():
t = typ
if isinstance(t, TypeVarType):
t = t.upper_bound
# TODO: should we only allow unions of instances as per PEP 484?
if not isinstance(get_proper_type(t), (UnionType, Instance, NoneType)):
# unknown type; error was likely reported earlier
return {}
converted_type_map[expr] = TypeType.make_normalized(typ)
return converted_type_map
def flatten(t: Expression) -> list[Expression]:
"""Flatten a nested sequence of tuples/lists into one list of nodes."""
if isinstance(t, (TupleExpr, ListExpr)):
return [b for a in t.items for b in flatten(a)]
elif isinstance(t, StarExpr):
return flatten(t.expr)
else:
return [t]
def flatten_types(t: Type) -> list[Type]:
"""Flatten a nested sequence of tuples into one list of nodes."""
t = get_proper_type(t)
if isinstance(t, TupleType):
return [b for a in t.items for b in flatten_types(a)]
elif is_named_instance(t, "builtins.tuple"):
return [t.args[0]]
else:
return [t]
def expand_func(defn: FuncItem, map: dict[TypeVarId, Type]) -> FuncItem:
visitor = TypeTransformVisitor(map)
ret = visitor.node(defn)
assert isinstance(ret, FuncItem)
return ret
| CollectArgTypeVarTypes |
python | mwaskom__seaborn | tests/test_distributions.py | {
"start": 10226,
"end": 28160
} | class ____(SharedAxesLevelTests):
func = staticmethod(kdeplot)
def get_last_color(self, ax, fill=True):
if fill:
return ax.collections[-1].get_facecolor()
else:
return ax.lines[-1].get_color()
@pytest.mark.parametrize("fill", [True, False])
def test_color(self, long_df, fill):
super().test_color(long_df, fill=fill)
if fill:
ax = plt.figure().subplots()
self.func(data=long_df, x="y", facecolor="C3", fill=True, ax=ax)
assert_colors_equal(self.get_last_color(ax), "C3", check_alpha=False)
ax = plt.figure().subplots()
self.func(data=long_df, x="y", fc="C4", fill=True, ax=ax)
assert_colors_equal(self.get_last_color(ax), "C4", check_alpha=False)
@pytest.mark.parametrize(
"variable", ["x", "y"],
)
def test_long_vectors(self, long_df, variable):
vector = long_df[variable]
vectors = [
variable, vector, vector.to_numpy(), vector.to_list(),
]
f, ax = plt.subplots()
for vector in vectors:
kdeplot(data=long_df, **{variable: vector})
xdata = [l.get_xdata() for l in ax.lines]
for a, b in itertools.product(xdata, xdata):
assert_array_equal(a, b)
ydata = [l.get_ydata() for l in ax.lines]
for a, b in itertools.product(ydata, ydata):
assert_array_equal(a, b)
def test_wide_vs_long_data(self, wide_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(data=wide_df, ax=ax1, common_norm=False, common_grid=False)
for col in wide_df:
kdeplot(data=wide_df, x=col, ax=ax2)
for l1, l2 in zip(ax1.lines[::-1], ax2.lines):
assert_array_equal(l1.get_xydata(), l2.get_xydata())
def test_flat_vector(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df["x"])
kdeplot(x=long_df["x"])
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_empty_data(self):
ax = kdeplot(x=[])
assert not ax.lines
def test_singular_data(self):
with pytest.warns(UserWarning):
ax = kdeplot(x=np.ones(10))
assert not ax.lines
with pytest.warns(UserWarning):
ax = kdeplot(x=[5])
assert not ax.lines
with pytest.warns(UserWarning):
# https://github.com/mwaskom/seaborn/issues/2762
ax = kdeplot(x=[1929245168.06679] * 18)
assert not ax.lines
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
ax = kdeplot(x=[5], warn_singular=False)
assert not ax.lines
def test_variable_assignment(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", fill=True)
kdeplot(data=long_df, y="x", fill=True)
v0 = ax.collections[0].get_paths()[0].vertices
v1 = ax.collections[1].get_paths()[0].vertices[:, [1, 0]]
assert_array_equal(v0, v1)
def test_vertical_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, y="x")
with pytest.warns(UserWarning):
kdeplot(data=long_df, x="x", vertical=True)
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_bw_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", bw_method="silverman")
with pytest.warns(UserWarning):
kdeplot(data=long_df, x="x", bw="silverman")
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_kernel_deprecation(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x")
with pytest.warns(UserWarning):
kdeplot(data=long_df, x="x", kernel="epi")
assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())
def test_shade_deprecation(self, long_df):
f, ax = plt.subplots()
with pytest.warns(FutureWarning):
kdeplot(data=long_df, x="x", shade=True)
kdeplot(data=long_df, x="x", fill=True)
fill1, fill2 = ax.collections
assert_array_equal(
fill1.get_paths()[0].vertices, fill2.get_paths()[0].vertices
)
@pytest.mark.parametrize("multiple", ["layer", "stack", "fill"])
def test_hue_colors(self, long_df, multiple):
ax = kdeplot(
data=long_df, x="x", hue="a",
multiple=multiple,
fill=True, legend=False
)
# Note that hue order is reversed in the plot
lines = ax.lines[::-1]
fills = ax.collections[::-1]
palette = color_palette()
for line, fill, color in zip(lines, fills, palette):
assert_colors_equal(line.get_color(), color)
assert_colors_equal(fill.get_facecolor(), to_rgba(color, .25))
def test_hue_stacking(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(
data=long_df, x="x", hue="a",
multiple="layer", common_grid=True,
legend=False, ax=ax1,
)
kdeplot(
data=long_df, x="x", hue="a",
multiple="stack", fill=False,
legend=False, ax=ax2,
)
layered_densities = np.stack([
l.get_ydata() for l in ax1.lines
])
stacked_densities = np.stack([
l.get_ydata() for l in ax2.lines
])
assert_array_equal(layered_densities.cumsum(axis=0), stacked_densities)
def test_hue_filling(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(
data=long_df, x="x", hue="a",
multiple="layer", common_grid=True,
legend=False, ax=ax1,
)
kdeplot(
data=long_df, x="x", hue="a",
multiple="fill", fill=False,
legend=False, ax=ax2,
)
layered = np.stack([l.get_ydata() for l in ax1.lines])
filled = np.stack([l.get_ydata() for l in ax2.lines])
assert_array_almost_equal(
(layered / layered.sum(axis=0)).cumsum(axis=0),
filled,
)
@pytest.mark.parametrize("multiple", ["stack", "fill"])
def test_fill_default(self, long_df, multiple):
ax = kdeplot(
data=long_df, x="x", hue="a", multiple=multiple, fill=None
)
assert len(ax.collections) > 0
@pytest.mark.parametrize("multiple", ["layer", "stack", "fill"])
def test_fill_nondefault(self, long_df, multiple):
f, (ax1, ax2) = plt.subplots(ncols=2)
kws = dict(data=long_df, x="x", hue="a")
kdeplot(**kws, multiple=multiple, fill=False, ax=ax1)
kdeplot(**kws, multiple=multiple, fill=True, ax=ax2)
assert len(ax1.collections) == 0
assert len(ax2.collections) > 0
def test_color_cycle_interaction(self, flat_series):
color = (.2, 1, .6)
f, ax = plt.subplots()
kdeplot(flat_series)
kdeplot(flat_series)
assert_colors_equal(ax.lines[0].get_color(), "C0")
assert_colors_equal(ax.lines[1].get_color(), "C1")
plt.close(f)
f, ax = plt.subplots()
kdeplot(flat_series, color=color)
kdeplot(flat_series)
assert_colors_equal(ax.lines[0].get_color(), color)
assert_colors_equal(ax.lines[1].get_color(), "C0")
plt.close(f)
f, ax = plt.subplots()
kdeplot(flat_series, fill=True)
kdeplot(flat_series, fill=True)
assert_colors_equal(ax.collections[0].get_facecolor(), to_rgba("C0", .25))
assert_colors_equal(ax.collections[1].get_facecolor(), to_rgba("C1", .25))
plt.close(f)
@pytest.mark.parametrize("fill", [True, False])
def test_artist_color(self, long_df, fill):
color = (.2, 1, .6)
alpha = .5
f, ax = plt.subplots()
kdeplot(long_df["x"], fill=fill, color=color)
if fill:
artist_color = ax.collections[-1].get_facecolor().squeeze()
else:
artist_color = ax.lines[-1].get_color()
default_alpha = .25 if fill else 1
assert_colors_equal(artist_color, to_rgba(color, default_alpha))
kdeplot(long_df["x"], fill=fill, color=color, alpha=alpha)
if fill:
artist_color = ax.collections[-1].get_facecolor().squeeze()
else:
artist_color = ax.lines[-1].get_color()
assert_colors_equal(artist_color, to_rgba(color, alpha))
def test_datetime_scale(self, long_df):
f, (ax1, ax2) = plt.subplots(2)
kdeplot(x=long_df["t"], fill=True, ax=ax1)
kdeplot(x=long_df["t"], fill=False, ax=ax2)
assert ax1.get_xlim() == ax2.get_xlim()
def test_multiple_argument_check(self, long_df):
with pytest.raises(ValueError, match="`multiple` must be"):
kdeplot(data=long_df, x="x", hue="a", multiple="bad_input")
def test_cut(self, rng):
x = rng.normal(0, 3, 1000)
f, ax = plt.subplots()
kdeplot(x=x, cut=0, legend=False)
xdata_0 = ax.lines[0].get_xdata()
assert xdata_0.min() == x.min()
assert xdata_0.max() == x.max()
kdeplot(x=x, cut=2, legend=False)
xdata_2 = ax.lines[1].get_xdata()
assert xdata_2.min() < xdata_0.min()
assert xdata_2.max() > xdata_0.max()
assert len(xdata_0) == len(xdata_2)
def test_clip(self, rng):
x = rng.normal(0, 3, 1000)
clip = -1, 1
ax = kdeplot(x=x, clip=clip)
xdata = ax.lines[0].get_xdata()
assert xdata.min() >= clip[0]
assert xdata.max() <= clip[1]
def test_line_is_density(self, long_df):
ax = kdeplot(data=long_df, x="x", cut=5)
x, y = ax.lines[0].get_xydata().T
assert integrate(y, x) == pytest.approx(1)
@pytest.mark.skipif(_no_scipy, reason="Test requires scipy")
def test_cumulative(self, long_df):
ax = kdeplot(data=long_df, x="x", cut=5, cumulative=True)
y = ax.lines[0].get_ydata()
assert y[0] == pytest.approx(0)
assert y[-1] == pytest.approx(1)
@pytest.mark.skipif(not _no_scipy, reason="Test requires scipy's absence")
def test_cumulative_requires_scipy(self, long_df):
with pytest.raises(RuntimeError):
kdeplot(data=long_df, x="x", cut=5, cumulative=True)
def test_common_norm(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(
data=long_df, x="x", hue="c", common_norm=True, cut=10, ax=ax1
)
kdeplot(
data=long_df, x="x", hue="c", common_norm=False, cut=10, ax=ax2
)
total_area = 0
for line in ax1.lines:
xdata, ydata = line.get_xydata().T
total_area += integrate(ydata, xdata)
assert total_area == pytest.approx(1)
for line in ax2.lines:
xdata, ydata = line.get_xydata().T
assert integrate(ydata, xdata) == pytest.approx(1)
def test_common_grid(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
order = "a", "b", "c"
kdeplot(
data=long_df, x="x", hue="a", hue_order=order,
common_grid=False, cut=0, ax=ax1,
)
kdeplot(
data=long_df, x="x", hue="a", hue_order=order,
common_grid=True, cut=0, ax=ax2,
)
for line, level in zip(ax1.lines[::-1], order):
xdata = line.get_xdata()
assert xdata.min() == long_df.loc[long_df["a"] == level, "x"].min()
assert xdata.max() == long_df.loc[long_df["a"] == level, "x"].max()
for line in ax2.lines:
xdata = line.get_xdata().T
assert xdata.min() == long_df["x"].min()
assert xdata.max() == long_df["x"].max()
def test_bw_method(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", bw_method=0.2, legend=False)
kdeplot(data=long_df, x="x", bw_method=1.0, legend=False)
kdeplot(data=long_df, x="x", bw_method=3.0, legend=False)
l1, l2, l3 = ax.lines
assert (
np.abs(np.diff(l1.get_ydata())).mean()
> np.abs(np.diff(l2.get_ydata())).mean()
)
assert (
np.abs(np.diff(l2.get_ydata())).mean()
> np.abs(np.diff(l3.get_ydata())).mean()
)
def test_bw_adjust(self, long_df):
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", bw_adjust=0.2, legend=False)
kdeplot(data=long_df, x="x", bw_adjust=1.0, legend=False)
kdeplot(data=long_df, x="x", bw_adjust=3.0, legend=False)
l1, l2, l3 = ax.lines
assert (
np.abs(np.diff(l1.get_ydata())).mean()
> np.abs(np.diff(l2.get_ydata())).mean()
)
assert (
np.abs(np.diff(l2.get_ydata())).mean()
> np.abs(np.diff(l3.get_ydata())).mean()
)
def test_log_scale_implicit(self, rng):
x = rng.lognormal(0, 1, 100)
f, (ax1, ax2) = plt.subplots(ncols=2)
ax1.set_xscale("log")
kdeplot(x=x, ax=ax1)
kdeplot(x=x, ax=ax1)
xdata_log = ax1.lines[0].get_xdata()
assert (xdata_log > 0).all()
assert (np.diff(xdata_log, 2) > 0).all()
assert np.allclose(np.diff(np.log(xdata_log), 2), 0)
f, ax = plt.subplots()
ax.set_yscale("log")
kdeplot(y=x, ax=ax)
assert_array_equal(ax.lines[0].get_xdata(), ax1.lines[0].get_ydata())
def test_log_scale_explicit(self, rng):
x = rng.lognormal(0, 1, 100)
f, (ax1, ax2, ax3) = plt.subplots(ncols=3)
ax1.set_xscale("log")
kdeplot(x=x, ax=ax1)
kdeplot(x=x, log_scale=True, ax=ax2)
kdeplot(x=x, log_scale=10, ax=ax3)
for ax in f.axes:
assert ax.get_xscale() == "log"
supports = [ax.lines[0].get_xdata() for ax in f.axes]
for a, b in itertools.product(supports, supports):
assert_array_equal(a, b)
densities = [ax.lines[0].get_ydata() for ax in f.axes]
for a, b in itertools.product(densities, densities):
assert_array_equal(a, b)
f, ax = plt.subplots()
kdeplot(y=x, log_scale=True, ax=ax)
assert ax.get_yscale() == "log"
def test_log_scale_with_hue(self, rng):
data = rng.lognormal(0, 1, 50), rng.lognormal(0, 2, 100)
ax = kdeplot(data=data, log_scale=True, common_grid=True)
assert_array_equal(ax.lines[0].get_xdata(), ax.lines[1].get_xdata())
def test_log_scale_normalization(self, rng):
x = rng.lognormal(0, 1, 100)
ax = kdeplot(x=x, log_scale=True, cut=10)
xdata, ydata = ax.lines[0].get_xydata().T
integral = integrate(ydata, np.log10(xdata))
assert integral == pytest.approx(1)
def test_weights(self):
x = [1, 2]
weights = [2, 1]
ax = kdeplot(x=x, weights=weights, bw_method=.1)
xdata, ydata = ax.lines[0].get_xydata().T
y1 = ydata[np.abs(xdata - 1).argmin()]
y2 = ydata[np.abs(xdata - 2).argmin()]
assert y1 == pytest.approx(2 * y2)
def test_weight_norm(self, rng):
vals = rng.normal(0, 1, 50)
x = np.concatenate([vals, vals])
w = np.repeat([1, 2], 50)
ax = kdeplot(x=x, weights=w, hue=w, common_norm=True)
# Recall that artists are added in reverse of hue order
x1, y1 = ax.lines[0].get_xydata().T
x2, y2 = ax.lines[1].get_xydata().T
assert integrate(y1, x1) == pytest.approx(2 * integrate(y2, x2))
def test_sticky_edges(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(data=long_df, x="x", fill=True, ax=ax1)
assert ax1.collections[0].sticky_edges.y[:] == [0, np.inf]
kdeplot(
data=long_df, x="x", hue="a", multiple="fill", fill=True, ax=ax2
)
assert ax2.collections[0].sticky_edges.y[:] == [0, 1]
def test_line_kws(self, flat_array):
lw = 3
color = (.2, .5, .8)
ax = kdeplot(x=flat_array, linewidth=lw, color=color)
line, = ax.lines
assert line.get_linewidth() == lw
assert_colors_equal(line.get_color(), color)
def test_input_checking(self, long_df):
err = "The x variable is categorical,"
with pytest.raises(TypeError, match=err):
kdeplot(data=long_df, x="a")
def test_axis_labels(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(data=long_df, x="x", ax=ax1)
assert ax1.get_xlabel() == "x"
assert ax1.get_ylabel() == "Density"
kdeplot(data=long_df, y="y", ax=ax2)
assert ax2.get_xlabel() == "Density"
assert ax2.get_ylabel() == "y"
def test_legend(self, long_df):
ax = kdeplot(data=long_df, x="x", hue="a")
assert ax.legend_.get_title().get_text() == "a"
legend_labels = ax.legend_.get_texts()
order = categorical_order(long_df["a"])
for label, level in zip(legend_labels, order):
assert label.get_text() == level
legend_artists = ax.legend_.findobj(mpl.lines.Line2D)
if _version_predates(mpl, "3.5.0b0"):
# https://github.com/matplotlib/matplotlib/pull/20699
legend_artists = legend_artists[::2]
palette = color_palette()
for artist, color in zip(legend_artists, palette):
assert_colors_equal(artist.get_color(), color)
ax.clear()
kdeplot(data=long_df, x="x", hue="a", legend=False)
assert ax.legend_ is None
def test_replaced_kws(self, long_df):
with pytest.raises(TypeError, match=r"`data2` has been removed"):
kdeplot(data=long_df, x="x", data2="y")
| TestKDEPlotUnivariate |
python | django-guardian__django-guardian | example_project/posts/admin.py | {
"start": 133,
"end": 374
} | class ____(GuardedModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = ("title", "slug", "created_at")
search_fields = ("title", "content")
ordering = ("-created_at",)
date_hierarchy = "created_at"
| PostAdmin |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_context_cache.py | {
"start": 5489,
"end": 9330
} | class ____:
"""Test the integration of SecretCache with variable access."""
@staticmethod
@conf_vars({("secrets", "use_cache"): "true"})
def setup_method():
SecretCache.reset()
SecretCache.init()
@staticmethod
def teardown_method():
SecretCache.reset()
@patch("airflow.sdk.execution_time.supervisor.ensure_secrets_backend_loaded")
def test_get_variable_uses_cache_when_available(self, mock_ensure_backends):
"""Test that _get_variable uses cache when variable is cached."""
key = "test_key"
value = "test_value"
SecretCache.save_variable(key, value)
result = _get_variable(key, deserialize_json=False)
assert result == value
mock_ensure_backends.assert_not_called()
@patch("airflow.sdk.execution_time.supervisor.ensure_secrets_backend_loaded")
def test_get_variable_from_backend_saves_to_cache(self, mock_ensure_backends):
"""Test that variable from secrets backend is saved to cache."""
key = "test_key"
value = "test_value"
mock_backend = MagicMock(spec=["get_variable"])
mock_backend.get_variable.return_value = value
mock_ensure_backends.return_value = [mock_backend]
result = _get_variable(key, deserialize_json=False)
assert result == value
mock_backend.get_variable.assert_called_once_with(key=key)
cached_value = SecretCache.get_variable(key)
assert cached_value == value
@patch("airflow.sdk.execution_time.supervisor.ensure_secrets_backend_loaded")
def test_get_variable_from_api_saves_to_cache(self, mock_ensure_backends, mock_supervisor_comms):
"""Test that variable from API server is saved to cache."""
key = "test_key"
value = "test_value"
var_result = VariableResult(key=key, value=value)
mock_ensure_backends.return_value = [ExecutionAPISecretsBackend()]
mock_supervisor_comms.send.return_value = var_result
result = _get_variable(key, deserialize_json=False)
assert result == value
cached_value = SecretCache.get_variable(key)
assert cached_value == value
@patch("airflow.sdk.execution_time.supervisor.ensure_secrets_backend_loaded")
def test_get_variable_with_json_deserialization(self, mock_ensure_backends):
"""Test that _get_variable handles JSON deserialization correctly with cache."""
key = "test_key"
json_value = '{"key": "value", "number": 42}'
SecretCache.save_variable(key, json_value)
result = _get_variable(key, deserialize_json=True)
assert result == {"key": "value", "number": 42}
cached_value = SecretCache.get_variable(key)
assert cached_value == json_value
def test_set_variable_invalidates_cache(self, mock_supervisor_comms):
"""Test that _set_variable invalidates the cache."""
key = "test_key"
old_value = "old_value"
new_value = "new_value"
SecretCache.save_variable(key, old_value)
_set_variable(key, new_value)
mock_supervisor_comms.send.assert_called_once()
with pytest.raises(SecretCache.NotPresentException):
SecretCache.get_variable(key)
def test_delete_variable_invalidates_cache(self, mock_supervisor_comms):
"""Test that _delete_variable invalidates the cache."""
key = "test_key"
value = "test_value"
SecretCache.save_variable(key, value)
from airflow.sdk.execution_time.comms import OKResponse
mock_supervisor_comms.send.return_value = OKResponse(ok=True)
_delete_variable(key)
mock_supervisor_comms.send.assert_called_once()
with pytest.raises(SecretCache.NotPresentException):
SecretCache.get_variable(key)
| TestVariableCacheIntegration |
python | falconry__falcon | falcon/_typing.py | {
"start": 2538,
"end": 2936
} | class ____(Protocol[_AReqT, _ARespT]):
async def __call__(
self,
req: _AReqT,
resp: _ARespT | None,
error: Exception,
params: dict[str, Any],
*,
ws: WebSocket | None = ...,
) -> None: ...
# Error serializers
ErrorSerializer = Callable[[_ReqT, _RespT, 'HTTPError'], None]
# Sinks
SinkPrefix = Union[str, Pattern[str]]
| AsgiErrorHandler |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/markdown/base.py | {
"start": 315,
"end": 5226
} | class ____(BaseReader):
"""
Markdown parser.
Extract text from markdown files.
Returns dictionary with keys as headers and values as the text between headers.
"""
def __init__(
self,
*args: Any,
remove_hyperlinks: bool = True,
remove_images: bool = True,
separator: str = " ",
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._remove_hyperlinks = remove_hyperlinks
self._remove_images = remove_images
self._separator = separator
def markdown_to_tups(self, markdown_text: str) -> List[Tuple[Optional[str], str]]:
"""Convert a markdown file to a list of tuples containing header and text."""
markdown_tups: List[Tuple[Optional[str], str]] = []
lines = markdown_text.split("\n")
current_lines = []
in_code_block = False
headers = {}
for line in lines:
# Toggle code block state
if line.startswith("```"):
in_code_block = not in_code_block
if in_code_block:
current_lines.append(line)
continue
# Process headers only when not in a code block
else:
line = line.strip()
if not line:
continue
header_match = re.match(r"^(#+)\s+(.*)", line)
if header_match:
if current_lines and not headers:
# Add content before first header
markdown_tups.append((None, "\n".join(current_lines)))
current_lines.clear()
# Extract header level and text
header_level = len(
header_match.group(1)
) # number of '#' indicates level
current_header = header_match.group(2) # the header text
if headers.get(header_level):
# Add previous section to the list before switching header
markdown_tups.append(
(
self._separator.join(headers.values()),
"\n".join(current_lines),
)
)
# remove all headers with level greater than current header
headers = {k: v for k, v in headers.items() if k < header_level}
current_lines.clear()
headers[header_level] = current_header
else:
current_lines.append(line)
# Append the last section
if current_lines or headers:
markdown_tups.append(
(self._separator.join(headers.values()), "\n".join(current_lines))
)
# Postprocess the tuples before returning
return [
(
key.strip() if key else None, # Clean up header (strip whitespace)
re.sub(r"<.*?>", "", value), # Remove HTML tags
)
for key, value in markdown_tups
]
def remove_images(self, content: str) -> str:
"""Remove images in markdown content but keep the description."""
pattern = r""
return re.sub(pattern, r"\1", content)
def remove_hyperlinks(self, content: str) -> str:
"""Remove hyperlinks in markdown content."""
pattern = r"\[(.*?)\]\((.*?)\)"
return re.sub(pattern, r"\1", content)
def _init_parser(self) -> Dict:
"""Initialize the parser with the config."""
return {}
def parse_tups(
self,
filepath: str,
errors: str = "ignore",
fs: Optional[AbstractFileSystem] = None,
) -> List[Tuple[Optional[str], str]]:
"""Parse file into tuples."""
fs = fs or LocalFileSystem()
with fs.open(filepath, encoding="utf-8") as f:
content = f.read().decode(encoding="utf-8")
if self._remove_hyperlinks:
content = self.remove_hyperlinks(content)
if self._remove_images:
content = self.remove_images(content)
return self.markdown_to_tups(content)
def load_data(
self,
file: str,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
tups = self.parse_tups(file, fs=fs)
results = []
for header, text in tups:
if header is None:
results.append(Document(text=text, metadata=extra_info or {}))
else:
results.append(
Document(text=f"\n\n{header}\n{text}", metadata=extra_info or {})
)
return results
| MarkdownReader |
python | pyparsing__pyparsing | tests/test_unit.py | {
"start": 2734,
"end": 4512
} | class ____(unittest.TestCase):
@contextlib.contextmanager
def assertRaises(self, expected_exception_type: Any, msg: Any = None):
"""
Simple wrapper to print out the exceptions raised after assertRaises
"""
with super().assertRaises(expected_exception_type, msg=msg) as ar:
yield
if getattr(ar, "exception", None) is not None:
print(
f"Raised expected exception: {type(ar.exception).__name__}: {ar.exception}"
)
else:
print(f"Expected {expected_exception_type.__name__} exception not raised")
return ar
@contextlib.contextmanager
def assertWarns(self, expected_warning_type: Any, msg: Any = None):
"""
Simple wrapper to print out the warnings raised after assertWarns
"""
with super().assertWarns(expected_warning_type, msg=msg) as ar:
yield
if getattr(ar, "warning", None) is not None:
print(f"Raised expected warning: {type(ar.warning).__name__}: {ar.warning}")
else:
print(f"Expected {expected_warning_type.__name__} warning not raised")
return ar
@contextlib.contextmanager
def assertDoesNotWarn(self, warning_type: type = UserWarning, msg: str = None):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
yield
except Exception as e:
if msg is None:
msg = f"unexpected warning {e} raised"
if isinstance(e, warning_type):
self.fail(f"{msg}: {e}")
else:
raise
finally:
warnings.simplefilter("default")
| TestCase |
python | getsentry__sentry | src/sentry/notifications/notification_action/metric_alert_registry/handlers/slack_metric_alert_handler.py | {
"start": 989,
"end": 2890
} | class ____(BaseMetricAlertHandler):
@classmethod
def send_alert(
cls,
notification_context: NotificationContext,
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
open_period_context: OpenPeriodContext,
trigger_status: TriggerStatus,
notification_uuid: str,
organization: Organization,
project: Project,
) -> None:
detector = Detector.objects.get(id=alert_context.action_identifier_id)
if not detector:
raise ValueError("Detector not found")
open_period = GroupOpenPeriod.objects.get(id=open_period_context.id)
if not open_period:
raise ValueError("Open period not found")
alert_rule_serialized_response = get_alert_rule_serializer(detector)
detector_serialized_response = get_detector_serializer(detector)
incident_serialized_response = get_detailed_incident_serializer(open_period)
logger.info(
"notification_action.execute_via_metric_alert_handler.slack",
extra={
"action_id": alert_context.action_identifier_id,
"serialized_incident": incident_serialized_response,
"serialized_alert_rule": alert_rule_serialized_response,
},
)
send_incident_alert_notification(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
organization=organization,
notification_uuid=notification_uuid,
alert_rule_serialized_response=alert_rule_serialized_response,
incident_serialized_response=incident_serialized_response,
detector_serialized_response=detector_serialized_response,
)
| SlackMetricAlertHandler |
python | google__pytype | pytype/overlays/typed_dict.py | {
"start": 13715,
"end": 14346
} | class ____(abstract.PyTDFunction):
"""Implementation of typing.is_typeddict."""
def call(self, node, func, args, alias_map=None):
self.match_args(node, args)
if args.posargs:
tp = args.posargs[0]
elif "tp" in args.namedargs:
tp = args.namedargs["tp"]
else:
return node, self.ctx.convert.bool_values[None].to_variable(node)
is_typeddict = [_is_typeddict(v) for v in tp.data]
if all(is_typeddict):
boolval = True
elif not any(is_typeddict):
boolval = False
else:
boolval = None
return node, self.ctx.convert.bool_values[boolval].to_variable(node)
| IsTypedDict |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 88081,
"end": 93848
} | class ____:
def test_chunksizes(self):
ds1 = xr.Dataset({"a": ("x", np.arange(10))})
ds2 = xr.Dataset({"b": ("y", np.arange(5))})
ds3 = xr.Dataset({"c": ("z", np.arange(4))})
ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))})
groups = {
"/": ds1.chunk({"x": 5}),
"/group1": ds2.chunk({"y": 3}),
"/group2": ds3.chunk({"z": 2}),
"/group1/subgroup1": ds4.chunk({"x": 5}),
}
tree = xr.DataTree.from_dict(groups)
expected_chunksizes = {path: node.chunksizes for path, node in groups.items()}
assert tree.chunksizes == expected_chunksizes
def test_load(self):
ds1 = xr.Dataset({"a": ("x", np.arange(10))})
ds2 = xr.Dataset({"b": ("y", np.arange(5))})
ds3 = xr.Dataset({"c": ("z", np.arange(4))})
ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))})
groups = {"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4}
expected = xr.DataTree.from_dict(groups)
tree = xr.DataTree.from_dict(
{
"/": ds1.chunk({"x": 5}),
"/group1": ds2.chunk({"y": 3}),
"/group2": ds3.chunk({"z": 2}),
"/group1/subgroup1": ds4.chunk({"x": 5}),
}
)
expected_chunksizes: Mapping[str, Mapping]
expected_chunksizes = {node.path: {} for node in tree.subtree}
actual = tree.load()
assert_identical(actual, expected)
assert tree.chunksizes == expected_chunksizes
assert actual.chunksizes == expected_chunksizes
tree = xr.DataTree.from_dict(groups)
actual = tree.load()
assert_identical(actual, expected)
assert actual.chunksizes == expected_chunksizes
def test_compute(self):
ds1 = xr.Dataset({"a": ("x", np.arange(10))})
ds2 = xr.Dataset({"b": ("y", np.arange(5))})
ds3 = xr.Dataset({"c": ("z", np.arange(4))})
ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))})
expected = xr.DataTree.from_dict(
{"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4}
)
tree = xr.DataTree.from_dict(
{
"/": ds1.chunk({"x": 5}),
"/group1": ds2.chunk({"y": 3}),
"/group2": ds3.chunk({"z": 2}),
"/group1/subgroup1": ds4.chunk({"x": 5}),
}
)
original_chunksizes = tree.chunksizes
expected_chunksizes: Mapping[str, Mapping]
expected_chunksizes = {node.path: {} for node in tree.subtree}
actual = tree.compute()
assert_identical(actual, expected)
assert actual.chunksizes == expected_chunksizes, "mismatching chunksizes"
assert tree.chunksizes == original_chunksizes, "original tree was modified"
def test_persist(self):
ds1 = xr.Dataset({"a": ("x", np.arange(10))})
ds2 = xr.Dataset({"b": ("y", np.arange(5))})
ds3 = xr.Dataset({"c": ("z", np.arange(4))})
ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))})
def fn(x):
return 2 * x
expected = xr.DataTree.from_dict(
{
"/": fn(ds1).chunk({"x": 5}),
"/group1": fn(ds2).chunk({"y": 3}),
"/group2": fn(ds3).chunk({"z": 2}),
"/group1/subgroup1": fn(ds4).chunk({"x": 5}),
}
)
# Add trivial second layer to the task graph, persist should reduce to one
tree = xr.DataTree.from_dict(
{
"/": fn(ds1.chunk({"x": 5})),
"/group1": fn(ds2.chunk({"y": 3})),
"/group2": fn(ds3.chunk({"z": 2})),
"/group1/subgroup1": fn(ds4.chunk({"x": 5})),
}
)
original_chunksizes = tree.chunksizes
original_hlg_depths = {
node.path: len(node.dataset.__dask_graph__().layers)
for node in tree.subtree
}
actual = tree.persist()
actual_hlg_depths = {
node.path: len(node.dataset.__dask_graph__().layers)
for node in actual.subtree
}
assert_identical(actual, expected)
assert actual.chunksizes == original_chunksizes, "chunksizes were modified"
assert tree.chunksizes == original_chunksizes, (
"original chunksizes were modified"
)
assert all(d == 1 for d in actual_hlg_depths.values()), (
"unexpected dask graph depth"
)
assert all(d == 2 for d in original_hlg_depths.values()), (
"original dask graph was modified"
)
def test_chunk(self):
ds1 = xr.Dataset({"a": ("x", np.arange(10))})
ds2 = xr.Dataset({"b": ("y", np.arange(5))})
ds3 = xr.Dataset({"c": ("z", np.arange(4))})
ds4 = xr.Dataset({"d": ("x", np.arange(-5, 5))})
expected = xr.DataTree.from_dict(
{
"/": ds1.chunk({"x": 5}),
"/group1": ds2.chunk({"y": 3}),
"/group2": ds3.chunk({"z": 2}),
"/group1/subgroup1": ds4.chunk({"x": 5}),
}
)
tree = xr.DataTree.from_dict(
{"/": ds1, "/group1": ds2, "/group2": ds3, "/group1/subgroup1": ds4}
)
actual = tree.chunk({"x": 5, "y": 3, "z": 2})
assert_identical(actual, expected)
assert actual.chunksizes == expected.chunksizes
with pytest.raises(TypeError, match="invalid type"):
tree.chunk(None)
with pytest.raises(TypeError, match="invalid type"):
tree.chunk((1, 2))
with pytest.raises(ValueError, match="not found in data dimensions"):
tree.chunk({"u": 2})
| TestDask |
python | pypa__warehouse | warehouse/accounts/interfaces.py | {
"start": 6837,
"end": 7225
} | class ____(Interface):
def dumps(data):
"""
Generates a unique token based on the data provided
"""
def loads(token):
"""
Gets the data corresponding to the token provided
"""
def unsafe_load_payload(token):
"""
Gets the data corresponding to the token provided *regardless of validity*
"""
| ITokenService |
python | huggingface__transformers | src/transformers/models/sam_hq/modeling_sam_hq.py | {
"start": 22320,
"end": 24617
} | class ____(SamHQPreTrainedModel):
_can_record_outputs = {
"hidden_states": SamHQVisionLayer,
"attentions": SamHQVisionAttention,
}
def __init__(self, config: SamHQVisionConfig):
super().__init__(config)
self.config = config
self.image_size = config.image_size
self.patch_embed = SamHQPatchEmbeddings(config)
self.pos_embed = None
if config.use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(
1,
config.image_size // config.patch_size,
config.image_size // config.patch_size,
config.hidden_size,
)
)
self.layers = nn.ModuleList()
for i in range(config.num_hidden_layers):
layer = SamHQVisionLayer(
config,
window_size=config.window_size if i not in config.global_attn_indexes else 0,
)
self.layers.append(layer)
self.neck = SamHQVisionNeck(config)
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.patch_embed
@check_model_inputs(tie_last_hidden_states=False)
def forward(
self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs]
) -> Union[tuple, SamHQVisionEncoderOutput]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.patch_embed(pixel_values)
if self.pos_embed is not None:
hidden_states = hidden_states + self.pos_embed
intermediate_embeddings = []
for layer_module in self.layers:
hidden_states = layer_module(hidden_states)
# Collect embeddings from non-windowed blocks
if hasattr(layer_module, "window_size") and layer_module.window_size == 0:
intermediate_embeddings.append(hidden_states)
hidden_states = self.neck(hidden_states)
return SamHQVisionEncoderOutput(
last_hidden_state=hidden_states,
intermediate_embeddings=intermediate_embeddings,
)
| SamHQVisionEncoder |
python | gevent__gevent | src/greentest/3.14/test_httpservers.py | {
"start": 12556,
"end": 15093
} | class ____(BaseTestCase):
CERTFILE = certdata_file("keycert.pem")
ONLYCERT = certdata_file("ssl_cert.pem")
ONLYKEY = certdata_file("ssl_key.pem")
CERTFILE_PROTECTED = certdata_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = certdata_file("ssl_key.passwd.pem")
EMPTYCERT = certdata_file("nullcert.pem")
BADCERT = certdata_file("badcert.pem")
KEY_PASSWORD = "somepass"
BADPASSWORD = "badpass"
tls = (ONLYCERT, ONLYKEY, None) # values by default
request_handler = DummyRequestHandler
def test_get(self):
response = self.request('/')
self.assertEqual(response.status, HTTPStatus.OK)
def request(self, uri, method='GET', body=None, headers={}):
context = ssl._create_unverified_context()
self.connection = http.client.HTTPSConnection(
self.HOST, self.PORT, context=context
)
self.connection.request(method, uri, body, headers)
return self.connection.getresponse()
def test_valid_certdata(self):
valid_certdata= [
(self.CERTFILE, None, None),
(self.CERTFILE, self.CERTFILE, None),
(self.CERTFILE_PROTECTED, None, self.KEY_PASSWORD),
(self.ONLYCERT, self.ONLYKEY_PROTECTED, self.KEY_PASSWORD),
]
for certfile, keyfile, password in valid_certdata:
with self.subTest(
certfile=certfile, keyfile=keyfile, password=password
):
server = create_https_server(certfile, keyfile, password)
self.assertIsInstance(server, HTTPSServer)
server.server_close()
def test_invalid_certdata(self):
invalid_certdata = [
(self.BADCERT, None, None),
(self.EMPTYCERT, None, None),
(self.ONLYCERT, None, None),
(self.ONLYKEY, None, None),
(self.ONLYKEY, self.ONLYCERT, None),
(self.CERTFILE_PROTECTED, None, self.BADPASSWORD),
# TODO: test the next case and add same case to test_ssl (We
# specify a cert and a password-protected file, but no password):
# (self.CERTFILE_PROTECTED, None, None),
# see issue #132102
]
for certfile, keyfile, password in invalid_certdata:
with self.subTest(
certfile=certfile, keyfile=keyfile, password=password
):
with self.assertRaises(ssl.SSLError):
create_https_server(certfile, keyfile, password)
| BaseHTTPSServerTestCase |
python | doocs__leetcode | lcof2/剑指 Offer II 085. 生成匹配的括号/Solution.py | {
"start": 0,
"end": 404
} | class ____:
def generateParenthesis(self, n: int) -> List[str]:
def dfs(left, right, t):
if left == n and right == n:
ans.append(t)
return
if left < n:
dfs(left + 1, right, t + '(')
if right < left:
dfs(left, right + 1, t + ')')
ans = []
dfs(0, 0, '')
return ans
| Solution |
python | Netflix__metaflow | metaflow/exception.py | {
"start": 3291,
"end": 3596
} | class ____(MetaflowException):
headline = "Unknown user"
def __init__(self):
msg = (
"Metaflow could not determine your user name based on "
"environment variables ($USERNAME etc.)"
)
super(MetaflowUnknownUser, self).__init__(msg)
| MetaflowUnknownUser |
python | ansible__ansible | lib/ansible/module_utils/facts/virtual/netbsd.py | {
"start": 862,
"end": 2791
} | class ____(Virtual, VirtualSysctlDetectionMixin):
platform = 'NetBSD'
def get_virtual_facts(self):
virtual_facts = {}
host_tech = set()
guest_tech = set()
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
virtual_product_facts = self.detect_virt_product('machdep.dmi.system-product')
guest_tech.update(virtual_product_facts['virtualization_tech_guest'])
host_tech.update(virtual_product_facts['virtualization_tech_host'])
virtual_facts.update(virtual_product_facts)
virtual_vendor_facts = self.detect_virt_vendor('machdep.dmi.system-vendor')
guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
if virtual_facts['virtualization_type'] == '':
virtual_facts.update(virtual_vendor_facts)
# The above logic is tried first for backwards compatibility. If
# something above matches, use it. Otherwise if the result is still
# empty, try machdep.hypervisor.
virtual_vendor_facts = self.detect_virt_vendor('machdep.hypervisor')
guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
if virtual_facts['virtualization_type'] == '':
virtual_facts.update(virtual_vendor_facts)
if os.path.exists('/dev/xencons'):
guest_tech.add('xen')
if virtual_facts['virtualization_type'] == '':
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
virtual_facts['virtualization_tech_guest'] = guest_tech
virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
| NetBSDVirtual |
python | nedbat__coveragepy | coverage/types.py | {
"start": 5011,
"end": 5168
} | class ____(Protocol):
"""A callable warn() function."""
def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: ...
| TWarnFn |
python | doocs__leetcode | solution/2000-2099/2076.Process Restricted Friend Requests/Solution.py | {
"start": 0,
"end": 804
} | class ____:
def friendRequests(
self, n: int, restrictions: List[List[int]], requests: List[List[int]]
) -> List[bool]:
def find(x: int) -> int:
if p[x] != x:
p[x] = find(p[x])
return p[x]
p = list(range(n))
ans = []
for u, v in requests:
pu, pv = find(u), find(v)
if pu == pv:
ans.append(True)
else:
ok = True
for x, y in restrictions:
px, py = find(x), find(y)
if (pu == px and pv == py) or (pu == py and pv == px):
ok = False
break
ans.append(ok)
if ok:
p[pu] = pv
return ans
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/test_options.py | {
"start": 37167,
"end": 41430
} | class ____(_Polymorphic):
def test_missing_attr_wpoly_subclasss(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
assert_raises_message(
sa.exc.ArgumentError,
r"Mapped class Mapper\[Manager\(managers\)\] does not apply to "
"any of "
r"the root entities in this query, e.g. "
r"with_polymorphic\(Person, \[Manager\]\).",
s.query(wp).options(load_only(Manager.status))._compile_state,
)
def test_missing_attr_of_type_subclass_one(self):
s = fixture_session()
e1 = with_polymorphic(Person, [Engineer])
assert_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Manager.manager_name" does '
r"not link from "
r'relationship "Company.employees.'
r'of_type\(with_polymorphic\(Person, \[Engineer\]\)\)".$',
lambda: s.query(Company)
.options(
joinedload(Company.employees.of_type(e1)).load_only(
Manager.manager_name
)
)
._compile_state(),
)
def test_missing_attr_of_type_subclass_two(self):
s = fixture_session()
assert_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Manager.manager_name" does '
r"not link from "
r'relationship "Company.employees.'
r'of_type\(Mapper\[Engineer\(engineers\)\]\)".$',
lambda: s.query(Company)
.options(
joinedload(Company.employees.of_type(Engineer)).load_only(
Manager.manager_name
)
)
._compile_state(),
)
def test_missing_attr_of_type_subclass_name_matches(self):
s = fixture_session()
# the name "status" is present on Engineer also, make sure
# that doesn't get mixed up here
assert_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Manager.status" does '
r"not link from "
r'relationship "Company.employees.'
r'of_type\(Mapper\[Engineer\(engineers\)\]\)".$',
lambda: s.query(Company)
.options(
joinedload(Company.employees.of_type(Engineer)).load_only(
Manager.status
)
)
._compile_state(),
)
def test_missing_attr_of_type_wpoly_subclass(self):
s = fixture_session()
wp = with_polymorphic(Person, [Manager], flat=True)
assert_raises_message(
sa.exc.ArgumentError,
r'ORM mapped entity or attribute "Manager.manager_name" does '
r"not link from "
r'relationship "Company.employees.'
r'of_type\(with_polymorphic\(Person, \[Manager\]\)\)".$',
lambda: s.query(Company)
.options(
joinedload(Company.employees.of_type(wp)).load_only(
Manager.manager_name
)
)
._compile_state(),
)
@testing.variation("use_options", [True, False])
def test_missing_attr_is_missing_of_type_for_subtype(self, use_options):
s = fixture_session()
with expect_raises_message(
sa.exc.ArgumentError,
r"ORM mapped entity or attribute "
r'(?:"Mapper\[Engineer\(engineers\)\]"|"Engineer.engineer_name") '
r'does not link from relationship "Company.employees". Did you '
r'mean to use "Company.employees.of_type\(Engineer\)" '
r'or "loadopt.options'
r'\(selectin_polymorphic\(Person, \[Engineer\]\), ...\)" \?',
):
if use_options:
s.query(Company).options(
joinedload(Company.employees).options(
defer(Engineer.engineer_name)
)
)._compile_state()
else:
s.query(Company).options(
joinedload(Company.employees).defer(Engineer.engineer_name)
)._compile_state()
| OptionsNoPropTestInh |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 49541,
"end": 50729
} | class ____(nn.Module):
def __init__(self, config: Sam3TrackerVideoPromptEncoderConfig):
super().__init__()
self.scale = config.scale
positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2))
self.register_buffer("positional_embedding", positional_embedding)
def forward(self, input_coords, input_shape=None):
"""Positionally encode points that are normalized to [0,1]."""
coordinates = input_coords.clone()
if input_shape is not None:
coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1]
coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0]
coordinates.to(torch.float32)
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coordinates = 2 * coordinates - 1
coordinates = coordinates.to(self.positional_embedding.dtype)
coordinates = coordinates @ self.positional_embedding
coordinates = 2 * np.pi * coordinates
# outputs d_1 x ... x d_n x channel shape
return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1)
| Sam3TrackerVideoPositionalEmbedding |
python | mlflow__mlflow | mlflow/tracing/export/uc_table.py | {
"start": 481,
"end": 2670
} | class ____(MlflowV3SpanExporter):
"""
An exporter implementation that logs the traces to Databricks Unity Catalog table.
"""
def __init__(self, tracking_uri: str | None = None) -> None:
super().__init__(tracking_uri)
# Track if we've raised an error for span export to avoid raising it multiple times.
self._has_raised_span_export_error = False
def _export_spans_incrementally(self, spans: Sequence[ReadableSpan]) -> None:
"""
Export spans incrementally as they complete.
Args:
spans: Sequence of ReadableSpan objects to export.
"""
location = get_active_spans_table_name()
if not location:
# this should not happen since this exporter is only used when a destination
# is set to UCSchemaLocation
_logger.debug("No active spans table name found. Skipping span export.")
return
_logger.debug(f"exporting spans to uc table: {location}")
# Wrapping with MLflow span interface for easier downstream handling
spans = [Span(span) for span in spans]
if self._should_log_async():
self._async_queue.put(
task=Task(
handler=self._client.log_spans,
args=(location, spans),
error_msg="Failed to log spans to the trace server.",
)
)
else:
try:
self._client.log_spans(location, spans)
except Exception as e:
if self._has_raised_span_export_error:
_logger.debug(f"Failed to log spans to the trace server: {e}", exc_info=True)
else:
_logger.warning(f"Failed to log spans to the trace server: {e}")
self._has_raised_span_export_error = True
def _should_enable_async_logging(self) -> bool:
return MLFLOW_ENABLE_ASYNC_TRACE_LOGGING.get()
# Override this to False since spans are logged to UC table instead of artifacts.
def _should_log_spans_to_artifacts(self, trace_info: TraceInfo) -> bool:
return False
| DatabricksUCTableSpanExporter |
python | pypa__pip | src/pip/_vendor/packaging/specifiers.py | {
"start": 1186,
"end": 2871
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier-like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier-like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier-like
objects are equal.
:param other: The other object to check against.
"""
@property
@abc.abstractmethod
def prereleases(self) -> bool | None:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
prereleases or it can be set to ``None`` (the default) to use default semantics.
"""
@prereleases.setter
def prereleases(self, value: bool) -> None:
"""Setter for :attr:`prereleases`.
:param value: The value to set.
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: bool | None = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
| BaseSpecifier |
python | automl__auto-sklearn | autosklearn/pipeline/components/classification/gaussian_nb.py | {
"start": 326,
"end": 2034
} | class ____(AutoSklearnClassificationAlgorithm):
def __init__(self, random_state=None, verbose=0):
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y):
import sklearn.naive_bayes
self.estimator = sklearn.naive_bayes.GaussianNB()
self.classes_ = np.unique(y.astype(int))
# Fallback for multilabel classification
if len(y.shape) > 1 and y.shape[1] > 1:
import sklearn.multiclass
self.estimator = sklearn.multiclass.OneVsRestClassifier(
self.estimator, n_jobs=1
)
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "GaussianNB",
"name": "Gaussian Naive Bayes classifier",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
return cs
| GaussianNB |
python | pypa__pipenv | pipenv/resolver.py | {
"start": 3217,
"end": 3752
} | class ____:
"""Core package requirement information."""
name: str
version: Optional[str] = None
extras: Set[str] = field(default_factory=set)
markers: Optional[str] = None
hashes: Set[str] = field(default_factory=set)
source: PackageSource = field(default_factory=PackageSource)
def __post_init__(self):
if isinstance(self.extras, list):
self.extras = set(self.extras)
if isinstance(self.hashes, list):
self.hashes = set(self.hashes)
@dataclass
| PackageRequirement |
python | kamyu104__LeetCode-Solutions | Python/number-of-steps-to-reduce-a-number-to-zero.py | {
"start": 32,
"end": 290
} | class ____(object):
def numberOfSteps (self, num):
"""
:type num: int
:rtype: int
"""
result = 0
while num:
result += 2 if num%2 else 1
num //= 2
return max(result-1, 0)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py | {
"start": 1325,
"end": 8600
} | class ____(test_util.TensorFlowTestCase):
def testDocStringExample(self):
rt = ragged_factory_ops.constant([[1, 2, 3], [4], [], [5, 6]])
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.indices,
[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]])
self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6])
self.assertAllEqual(st.dense_shape, [4, 3])
def test2DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [],
['g']])
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(
st.indices, [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0], [4, 0]])
self.assertAllEqual(st.values, b'a b c d e f g'.split())
self.assertAllEqual(st.dense_shape, [5, 3])
def test3DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]], [[11, 12]], [], [[13, 14]]
],
ragged_rank=1)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.indices,
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
[1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 2, 0], [1, 2, 1],
[2, 0, 0], [2, 0, 1], [4, 0, 0], [4, 0, 1]])
self.assertAllEqual(st.values,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertAllEqual(st.dense_shape, [5, 3, 2])
def test4DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [], [[[9, 10], [11, 12]]]],
ragged_rank=1)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
self.assertAllEqual(
st.indices,
[
[0, 0, 0, 0], # index for value=1
[0, 0, 0, 1], # index for value=2
[0, 0, 1, 0], # index for value=3
[0, 0, 1, 1], # index for value=4
[0, 1, 0, 0], # index for value=5
[0, 1, 0, 1], # index for value=6
[0, 1, 1, 0], # index for value=7
[0, 1, 1, 1], # index for value=8
[2, 0, 0, 0], # index for value=9
[2, 0, 0, 1], # index for value=10
[2, 0, 1, 0], # index for value=11
[2, 0, 1, 1], # index for value=12
])
self.assertAllEqual(st.dense_shape, [3, 2, 2, 2])
def test4DRaggedTensorWithTwoRaggedDimensions(self):
rt = ragged_factory_ops.constant(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]]],
[[[11, 12]], [], [[13, 14]]], []],
ragged_rank=2)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(
st.indices,
[
[0, 0, 0, 0], # index for value=1
[0, 0, 0, 1], # index for value=2
[0, 0, 1, 0], # index for value=3
[0, 0, 1, 1], # index for value=4
[0, 1, 0, 0], # index for value=5
[0, 1, 0, 1], # index for value=6
[0, 1, 1, 0], # index for value=7
[0, 1, 1, 1], # index for value=8
[0, 1, 2, 0], # index for value=9
[0, 1, 2, 1], # index for value=10
[1, 0, 0, 0], # index for value=11
[1, 0, 0, 1], # index for value=12
[1, 2, 0, 0], # index for value=13
[1, 2, 0, 1], # index for value=14
])
self.assertAllEqual(st.values,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertAllEqual(st.dense_shape, [3, 3, 3, 2])
def testShape(self):
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]])
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [7, 2])
self.assertEqual(st.values.shape.as_list(), [7])
self.assertEqual(st.dense_shape.shape.as_list(), [2])
rt = ragged_factory_ops.constant([[[1, 2]], [], [[3, 4]], []],
ragged_rank=1)
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [4, 3])
self.assertEqual(st.values.shape.as_list(), [4])
self.assertEqual(st.dense_shape.shape.as_list(), [3])
rt = ragged_factory_ops.constant([[[1], [2, 3, 4, 5, 6, 7]], [[]]])
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [7, 3])
self.assertEqual(st.values.shape.as_list(), [7])
self.assertEqual(st.dense_shape.shape.as_list(), [3])
def testKernelErrors(self):
# An empty vector, defined using a placeholder to ensure that we can't
# determine that it's invalid at graph-construction time.
empty_vector = array_ops.placeholder_with_default(
array_ops.zeros([0], dtypes.int64), shape=None)
bad_rt1 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[2, 3], values=[1, 2, 3], validate=False)
bad_split0 = r'First value of ragged splits must be 0.*'
with self.assertRaisesRegex(errors.InvalidArgumentError, bad_split0):
self.evaluate(bad_rt1.to_sparse())
bad_rt2 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5], values=empty_vector, validate=False)
bad_rt3 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 1],
values=ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5], values=empty_vector, validate=False),
validate=False)
split_mismatch1_error = r'Final value of ragged splits must match.*'
for rt in [bad_rt2, bad_rt3]:
with self.assertRaisesRegex(errors.InvalidArgumentError,
split_mismatch1_error):
self.evaluate(rt.to_sparse())
bad_rt4 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5],
values=ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0], values=empty_vector, validate=False),
validate=False)
split_mismatch2_error = r'Final value of ragged splits must match.*'
with self.assertRaisesRegex(errors.InvalidArgumentError,
split_mismatch2_error):
self.evaluate(bad_rt4.to_sparse())
bad_rt5 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=empty_vector, values=[], validate=False)
empty_splits_error = (r'ragged splits may not be empty.*')
with self.assertRaisesRegex(errors.InvalidArgumentError,
empty_splits_error):
self.evaluate(bad_rt5.to_sparse())
def testGradient(self):
if context.executing_eagerly():
return
# rt1.shape == rt2.shape == [2, (D2), (D3), 2].
rt1 = ragged_factory_ops.constant(
[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0]]]], ragged_rank=2)
rt2 = ragged_factory_ops.constant(
[[[[9.0, 8.0], [7.0, 6.0]], [[5.0, 4.0]]]], ragged_rank=2)
rt = ragged_functional_ops.map_flat_values(math_ops.add, rt1, rt2 * 2.0)
st = rt.to_sparse()
g1, g2 = gradients_impl.gradients(st.values,
[rt1.flat_values, rt2.flat_values])
self.assertAllEqual(g1, [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
self.assertAllEqual(g2, [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]])
if __name__ == '__main__':
googletest.main()
| RaggedTensorToSparseOpTest |
python | numba__numba | numba/tests/test_parallel_backend.py | {
"start": 3405,
"end": 4658
} | class ____(runnable):
def __call__(self):
sig = ['(f4, f4, f4[:])']
cfunc = guvectorize(sig, '(),()->()', **self._options)(gufunc_foo)
a = b = np.random.random(10).astype(np.float32)
expected = ufunc_foo(a, b)
got = cfunc(a, b)
np.testing.assert_allclose(expected, got)
def chooser(fnlist, **kwargs):
q = kwargs.get('queue')
try:
faulthandler.enable()
for _ in range(int(len(fnlist) * 1.5)):
fn = random.choice(fnlist)
fn()
except Exception as e:
q.put(e)
def compile_factory(parallel_class, queue_impl):
def run_compile(fnlist):
q = queue_impl()
kws = {'queue': q}
ths = [parallel_class(target=chooser, args=(fnlist,), kwargs=kws)
for i in range(4)]
for th in ths:
th.start()
for th in ths:
th.join()
if not q.empty():
errors = []
while not q.empty():
errors.append(q.get(False))
_msg = "Error(s) occurred in delegated runner:\n%s"
raise RuntimeError(_msg % '\n'.join([repr(x) for x in errors]))
return run_compile
# workers
_thread_class = threading.Thread
| guvectorize_runner |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 383253,
"end": 383920
} | class ____(sgqlc.types.Interface):
"""Represents an announcement banner."""
__schema__ = github_schema
__field_names__ = ("announcement", "announcement_expires_at", "announcement_user_dismissible")
announcement = sgqlc.types.Field(String, graphql_name="announcement")
"""The text of the announcement"""
announcement_expires_at = sgqlc.types.Field(DateTime, graphql_name="announcementExpiresAt")
"""The expiration date of the announcement, if any"""
announcement_user_dismissible = sgqlc.types.Field(Boolean, graphql_name="announcementUserDismissible")
"""Whether the announcement can be dismissed by the user"""
| AnnouncementBanner |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_whoosh_management_commands.py | {
"start": 778,
"end": 3751
} | class ____(WhooshTestCase):
fixtures = ["bulk_data"]
def setUp(self):
super().setUp()
self.old_ui = connections["whoosh"].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections["whoosh"].get_backend()
connections["whoosh"]._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = MockModel.objects.all()
def tearDown(self):
connections["whoosh"]._index = self.old_ui
super().tearDown()
def verify_indexed_document_count(self, expected):
with self.raw_whoosh.searcher() as searcher:
count = searcher.doc_count()
self.assertEqual(count, expected)
def verify_indexed_documents(self):
"""Confirm that the documents in the search index match the database"""
with self.raw_whoosh.searcher() as searcher:
count = searcher.doc_count()
self.assertEqual(count, 23)
indexed_doc_ids = set(i["id"] for i in searcher.documents())
expected_doc_ids = set(
"core.mockmodel.%d" % i
for i in MockModel.objects.values_list("pk", flat=True)
)
self.assertSetEqual(indexed_doc_ids, expected_doc_ids)
def test_basic_commands(self):
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("update_index", verbosity=0)
self.verify_indexed_documents()
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("rebuild_index", interactive=False, verbosity=0)
self.verify_indexed_documents()
def test_remove(self):
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("update_index", verbosity=0)
self.verify_indexed_documents()
# Remove several instances.
MockModel.objects.get(pk=1).delete()
MockModel.objects.get(pk=2).delete()
MockModel.objects.get(pk=8).delete()
self.verify_indexed_document_count(23)
# Plain ``update_index`` doesn't fix it.
call_command("update_index", verbosity=0)
self.verify_indexed_document_count(23)
# … but remove does:
call_command("update_index", remove=True, verbosity=0)
self.verify_indexed_document_count(20)
def test_multiprocessing(self):
call_command("clear_index", interactive=False, verbosity=0)
self.verify_indexed_document_count(0)
call_command("update_index", verbosity=2, workers=2, batchsize=5)
self.verify_indexed_documents()
| ManagementCommandTestCase |
python | dask__distributed | distributed/http/scheduler/api.py | {
"start": 302,
"end": 1104
} | class ____(RequestHandler):
async def post(self):
self.set_header("Content-Type", "application/json")
scheduler = self.server
try:
params = json.loads(self.request.body)
n_workers = params.get("n", 0)
if n_workers:
workers = scheduler.workers_to_close(n=n_workers)
workers_info = await scheduler.retire_workers(workers=workers)
else:
workers = params.get("workers", {})
workers_info = await scheduler.retire_workers(workers=workers)
self.write(json.dumps(recursive_to_dict(workers_info)))
except Exception as e:
self.set_status(500, str(e))
self.write(json.dumps({"Error": "Internal Server Error"}))
| RetireWorkersHandler |
python | falconry__falcon | falcon/testing/client.py | {
"start": 94280,
"end": 98705
} | class ____:
def __init__(
self, ws: helpers.ASGIWebSocketSimulator, task_req: asyncio.Task
) -> None:
self._ws = ws
self._task_req = task_req
async def __aenter__(self) -> helpers.ASGIWebSocketSimulator:
ready_waiter = asyncio.create_task(self._ws.wait_ready())
# NOTE(kgriffs): Wait on both so that in the case that the request
# task raises an error, we don't just end up masking it with an
# asyncio.TimeoutError.
await asyncio.wait(
[ready_waiter, self._task_req],
return_when=asyncio.FIRST_COMPLETED,
)
if ready_waiter.done():
await ready_waiter
else:
# NOTE(kgriffs): Retrieve the exception, if any
await self._task_req
# NOTE(kgriffs): This should complete gracefully (without a
# timeout). It may raise WebSocketDisconnected, but that
# is expected and desired for "normal" reasons that the
# request task finished without accepting the connection.
await ready_waiter
return self._ws
async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
await self._ws.close()
await self._task_req
def _prepare_sim_args(
path: str,
query_string: str | None,
params: Mapping[str, Any] | None,
params_csv: bool,
content_type: str | None,
headers: HeaderArg | None,
body: str | bytes | None,
json: Any | None,
extras: Mapping[str, Any] | None,
) -> tuple[str, str, HeaderArg | None, str | bytes | None, Mapping[str, Any]]:
if not path.startswith('/'):
raise ValueError("path must start with '/'")
if '?' in path:
if query_string or params:
raise ValueError(
'path may not contain a query string in combination with '
'the query_string or params parameters. Please use only one '
'way of specifying the query string.'
)
path, query_string = path.split('?', 1)
elif query_string and query_string.startswith('?'):
raise ValueError("query_string should not start with '?'")
extras = extras or {}
if query_string is None:
query_string = to_query_str(
params,
comma_delimited_lists=params_csv,
prefix=False,
)
if content_type is not None:
headers = dict(headers or {})
headers['Content-Type'] = content_type
if json is not None:
body = json_module.dumps(json, ensure_ascii=False)
headers = dict(headers or {})
headers['Content-Type'] = MEDIA_JSON
return path, query_string, headers, body, extras
def _is_asgi_app(app: Callable[..., Any]) -> bool:
app_args = inspect.getfullargspec(app).args
num_app_args = len(app_args)
# NOTE(kgriffs): Technically someone could name the "self" or "cls"
# arg something else, but we will make the simplifying
# assumption that this is rare enough to not worry about.
if app_args[0] in {'cls', 'self'}:
num_app_args -= 1
is_asgi = num_app_args == 3
return is_asgi
async def _wait_for_startup(events: Iterable[AsgiEvent]) -> None:
# NOTE(kgriffs): This is covered, but our gate for some reason doesn't
# understand `while True`.
while True: # pragma: nocover
for e in events:
if e['type'] == 'lifespan.startup.failed':
raise RuntimeError(
'ASGI app returned lifespan.startup.failed. ' + e['message']
)
if any(e['type'] == 'lifespan.startup.complete' for e in events):
break
# NOTE(kgriffs): Yield to the concurrent lifespan task
await asyncio.sleep(0)
async def _wait_for_shutdown(events: Iterable[AsgiEvent]) -> None:
# NOTE(kgriffs): This is covered, but our gate for some reason doesn't
# understand `while True`.
while True: # pragma: nocover
for e in events:
if e['type'] == 'lifespan.shutdown.failed':
raise RuntimeError(
'ASGI app returned lifespan.shutdown.failed. ' + e['message']
)
if any(e['type'] == 'lifespan.shutdown.complete' for e in events):
break
# NOTE(kgriffs): Yield to the concurrent lifespan task
await asyncio.sleep(0)
| _WSContextManager |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated_comment.py | {
"start": 403,
"end": 5226
} | class ____(pylint.checkers.BaseTokenChecker):
"""Checks for ``# deprecated:`` comments to ensure that the ``version`` has not passed or met the time for removal."""
name = 'deprecated-comment'
msgs = {
'E9601': (
"Deprecated core version (%r) found: %s",
"ansible-deprecated-version-comment",
None,
),
'E9602': (
"Deprecated comment contains invalid keys %r",
"ansible-deprecated-version-comment-invalid-key",
None,
),
'E9603': (
"Deprecated comment missing version",
"ansible-deprecated-version-comment-missing-version",
None,
),
'E9604': (
"Deprecated python version (%r) found: %s",
"ansible-deprecated-python-version-comment",
None,
),
'E9605': (
"Deprecated comment contains invalid version %r: %s",
"ansible-deprecated-version-comment-invalid-version",
None,
),
}
ANSIBLE_VERSION = LooseVersion('.'.join(ansible.release.__version__.split('.')[:3]))
"""The current ansible-core X.Y.Z version."""
def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None:
for token in tokens:
if token.type == tokenize.COMMENT:
self._process_comment(token)
def _deprecated_string_to_dict(self, token: tokenize.TokenInfo, string: str) -> dict[str, str]:
valid_keys = {'description', 'core_version', 'python_version'}
data = dict.fromkeys(valid_keys)
for opt in shlex.split(string):
if '=' not in opt:
data[opt] = None
continue
key, _sep, value = opt.partition('=')
data[key] = value
if not any((data['core_version'], data['python_version'])):
self.add_message(
'ansible-deprecated-version-comment-missing-version',
line=token.start[0],
col_offset=token.start[1],
)
bad = set(data).difference(valid_keys)
if bad:
self.add_message(
'ansible-deprecated-version-comment-invalid-key',
line=token.start[0],
col_offset=token.start[1],
args=(','.join(bad),),
)
return data
def _process_python_version(self, token: tokenize.TokenInfo, data: dict[str, str]) -> None:
check_version = '.'.join(map(str, self.linter.config.py_version)) # minimum supported Python version provided by ansible-test
try:
if LooseVersion(check_version) > LooseVersion(data['python_version']):
self.add_message(
'ansible-deprecated-python-version-comment',
line=token.start[0],
col_offset=token.start[1],
args=(
data['python_version'],
data['description'] or 'description not provided',
),
)
except (ValueError, TypeError) as exc:
self.add_message(
'ansible-deprecated-version-comment-invalid-version',
line=token.start[0],
col_offset=token.start[1],
args=(data['python_version'], exc),
)
def _process_core_version(self, token: tokenize.TokenInfo, data: dict[str, str]) -> None:
try:
if self.ANSIBLE_VERSION >= LooseVersion(data['core_version']):
self.add_message(
'ansible-deprecated-version-comment',
line=token.start[0],
col_offset=token.start[1],
args=(
data['core_version'],
data['description'] or 'description not provided',
),
)
except (ValueError, TypeError) as exc:
self.add_message(
'ansible-deprecated-version-comment-invalid-version',
line=token.start[0],
col_offset=token.start[1],
args=(data['core_version'], exc),
)
def _process_comment(self, token: tokenize.TokenInfo) -> None:
if token.string.startswith('# deprecated:'):
data = self._deprecated_string_to_dict(token, token.string[13:].strip())
if data['core_version']:
self._process_core_version(token, data)
if data['python_version']:
self._process_python_version(token, data)
def register(linter: pylint.lint.PyLinter) -> None:
"""Required method to auto-register this checker."""
linter.register_checker(AnsibleDeprecatedCommentChecker(linter))
| AnsibleDeprecatedCommentChecker |
python | numba__llvmlite | llvmlite/binding/newpassmanagers.py | {
"start": 19785,
"end": 34496
} | class ____(ffi.ObjectRef):
def __init__(self, tm, pto):
super().__init__(ffi.lib.LLVMPY_CreatePassBuilder(tm, pto))
self._pto = pto
self._tm = tm
self._time_passes_handler = None
def getModulePassManager(self):
return ModulePassManager(
ffi.lib.LLVMPY_buildPerModuleDefaultPipeline(
self, self._pto.speed_level, self._pto.size_level)
)
def getFunctionPassManager(self):
return FunctionPassManager(
ffi.lib.LLVMPY_buildFunctionSimplificationPipeline(
self, self._pto.speed_level, self._pto.size_level)
)
def start_pass_timing(self):
"""Enable the pass timers.
Raises
------
RuntimeError
If pass timing is already enabled.
"""
if self._time_passes_handler:
raise RuntimeError("Pass timing can only be done once")
self._time_passes_handler = TimePassesHandler()
ffi.lib.LLVMPY_EnableTimePasses(self, self._time_passes_handler)
def finish_pass_timing(self):
"""Returns the pass timings report and disables the LLVM internal
timers. Pass timers are enabled by ``start_pass_timing()``. If the
timers are not enabled, this function will return an empty string.
Returns
-------
res : str
LLVM generated timing report.
"""
if not self._time_passes_handler:
raise RuntimeError("Pass timing is not enabled")
with ffi.OutputString() as buf:
ffi.lib.LLVMPY_ReportAndDisableTimePasses(
self._time_passes_handler, buf)
return str(buf)
def _dispose(self):
ffi.lib.LLVMPY_DisposePassBuilder(self)
# ============================================================================
# FFI
ffi.lib.LLVMPY_DumpRefPruneStats.argtypes = [POINTER(_c_PruneStats), c_bool]
ffi.lib.LLVMPY_SetTimePasses.argtypes = [c_bool]
ffi.lib.LLVMPY_ReportAndResetTimings.argtypes = [POINTER(c_char_p)]
# ModulePassManager
ffi.lib.LLVMPY_CreateNewModulePassManager.restype = ffi.LLVMModulePassManagerRef
ffi.lib.LLVMPY_RunNewModulePassManager.argtypes = [
ffi.LLVMModulePassManagerRef, ffi.LLVMModuleRef,
ffi.LLVMPassBuilderRef,]
ffi.lib.LLVMPY_module_AddVerifierPass.argtypes = [ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddAAEvaluator.argtypes = [ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddSimplifyCFGPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLoopUnrollPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLoopRotatePass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddInstCombinePass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_AddJumpThreadingPass_module.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddCFGPrinterPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddCFGOnlyPrinterPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddDomPrinter.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddDomOnlyPrinter.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddPostDomPrinter.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddPostDomOnlyPrinter.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddDomViewer.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddDomOnlyViewer.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddPostDomViewer.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddPostDomOnlyViewer.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLintPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddADCEPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddBreakCriticalEdgesPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddDSEPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddDCEPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddAggressiveInstCombinePass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLCSSAPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddNewGVNPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLoopSimplifyPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLoopUnrollAndJamPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddSCCPPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLowerAtomicPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLowerInvokePass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLowerSwitchPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddMemCpyOptPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddUnifyFunctionExitNodesPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddReassociatePass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddRegToMemPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddSROAPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddSinkingPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddTailCallElimPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddInstructionNamerPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLoopDeletionPass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddLoopStrengthReducePass.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_module_AddConstantMergePass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddDeadArgumentEliminationPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddCallGraphDOTPrinterPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddModuleDebugInfoPrinterPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddAlwaysInlinerPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddReversePostOrderFunctionAttrsPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddGlobalDCEPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddGlobalOptPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddIPSCCPPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddInternalizePass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddLoopExtractorPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddMergeFunctionsPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddPartialInlinerPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddStripSymbolsPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddStripDeadDebugInfoPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddStripDeadPrototypesPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddStripDebugDeclarePass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddStripNonDebugSymbolsPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddArgumentPromotionPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_module_AddPostOrderFunctionAttrsPass.argtypes = [
ffi.LLVMModulePassManagerRef, ]
ffi.lib.LLVMPY_DisposeNewModulePassManger.argtypes = [
ffi.LLVMModulePassManagerRef,]
ffi.lib.LLVMPY_AddRefPrunePass_module.argtypes = [
ffi.LLVMModulePassManagerRef, c_int, c_size_t,
]
# FunctionPassManager
ffi.lib.LLVMPY_CreateNewFunctionPassManager.restype = \
ffi.LLVMFunctionPassManagerRef
ffi.lib.LLVMPY_RunNewFunctionPassManager.argtypes = [
ffi.LLVMFunctionPassManagerRef, ffi.LLVMValueRef,
ffi.LLVMPassBuilderRef,]
ffi.lib.LLVMPY_function_AddAAEvaluator.argtypes = [
ffi.LLVMFunctionPassManagerRef,]
ffi.lib.LLVMPY_function_AddSimplifyCFGPass.argtypes = [
ffi.LLVMFunctionPassManagerRef,]
ffi.lib.LLVMPY_function_AddLoopUnrollPass.argtypes = [
ffi.LLVMFunctionPassManagerRef,]
ffi.lib.LLVMPY_function_AddInstCombinePass.argtypes = [
ffi.LLVMFunctionPassManagerRef,]
ffi.lib.LLVMPY_AddJumpThreadingPass_function.argtypes = [
ffi.LLVMFunctionPassManagerRef, c_int,]
ffi.lib.LLVMPY_function_AddCFGPrinterPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddCFGOnlyPrinterPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddDomPrinter.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddDomOnlyPrinter.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddPostDomPrinter.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddPostDomOnlyPrinter.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddDomViewer.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddDomOnlyViewer.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddPostDomViewer.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddPostDomOnlyViewer.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLintPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddADCEPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddBreakCriticalEdgesPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddDSEPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddDCEPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddAggressiveInstCombinePass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLCSSAPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddNewGVNPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLoopSimplifyPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLoopUnrollAndJamPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddSCCPPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLowerAtomicPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLowerInvokePass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLowerSwitchPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddMemCpyOptPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddUnifyFunctionExitNodesPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddReassociatePass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddRegToMemPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddSROAPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddSinkingPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddTailCallElimPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddInstructionNamerPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLoopRotatePass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLoopDeletionPass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_function_AddLoopStrengthReducePass.argtypes = [
ffi.LLVMFunctionPassManagerRef, ]
ffi.lib.LLVMPY_DisposeNewFunctionPassManger.argtypes = [
ffi.LLVMFunctionPassManagerRef,]
ffi.lib.LLVMPY_AddRefPrunePass_function.argtypes = [
ffi.LLVMFunctionPassManagerRef, c_int, c_size_t,
]
# PipelineTuningOptions
ffi.lib.LLVMPY_CreatePipelineTuningOptions.restype = \
ffi.LLVMPipelineTuningOptionsRef
ffi.lib.LLVMPY_PTOGetLoopInterleaving.restype = c_bool
ffi.lib.LLVMPY_PTOGetLoopInterleaving.argtypes = [
ffi.LLVMPipelineTuningOptionsRef,]
ffi.lib.LLVMPY_PTOSetLoopInterleaving.argtypes = [
ffi.LLVMPipelineTuningOptionsRef, c_bool]
ffi.lib.LLVMPY_PTOGetLoopVectorization.restype = c_bool
ffi.lib.LLVMPY_PTOGetLoopVectorization.argtypes = [
ffi.LLVMPipelineTuningOptionsRef,]
ffi.lib.LLVMPY_PTOSetLoopVectorization.argtypes = [
ffi.LLVMPipelineTuningOptionsRef, c_bool]
ffi.lib.LLVMPY_PTOGetSLPVectorization.restype = c_bool
ffi.lib.LLVMPY_PTOGetSLPVectorization.argtypes = [
ffi.LLVMPipelineTuningOptionsRef,]
ffi.lib.LLVMPY_PTOSetSLPVectorization.argtypes = [
ffi.LLVMPipelineTuningOptionsRef, c_bool]
ffi.lib.LLVMPY_PTOGetLoopUnrolling.restype = c_bool
ffi.lib.LLVMPY_PTOGetLoopUnrolling.argtypes = [
ffi.LLVMPipelineTuningOptionsRef,]
ffi.lib.LLVMPY_PTOSetLoopUnrolling.argtypes = [
ffi.LLVMPipelineTuningOptionsRef, c_bool]
ffi.lib.LLVMPY_PTOGetInlinerThreshold.restype = c_int
ffi.lib.LLVMPY_PTOSetInlinerThreshold.argtypes = [
ffi.LLVMPipelineTuningOptionsRef, c_int]
ffi.lib.LLVMPY_DisposePipelineTuningOptions.argtypes = \
[ffi.LLVMPipelineTuningOptionsRef,]
# PassBuilder
ffi.lib.LLVMPY_CreatePassBuilder.restype = ffi.LLVMPassBuilderRef
ffi.lib.LLVMPY_CreatePassBuilder.argtypes = [
ffi.LLVMTargetMachineRef,
ffi.LLVMPipelineTuningOptionsRef,
]
ffi.lib.LLVMPY_DisposePassBuilder.argtypes = [ffi.LLVMPassBuilderRef,]
ffi.lib.LLVMPY_CreateTimePassesHandler.restype = \
ffi.LLVMTimePassesHandlerRef
ffi.lib.LLVMPY_DisposeTimePassesHandler.argtypes = [
ffi.LLVMTimePassesHandlerRef,]
ffi.lib.LLVMPY_EnableTimePasses.argtypes = [
ffi.LLVMPassBuilderRef,
ffi.LLVMTimePassesHandlerRef,
]
ffi.lib.LLVMPY_ReportAndDisableTimePasses.argtypes = [
ffi.LLVMTimePassesHandlerRef,
POINTER(c_char_p),
]
# Pipeline builders
ffi.lib.LLVMPY_buildPerModuleDefaultPipeline.restype = \
ffi.LLVMModulePassManagerRef
ffi.lib.LLVMPY_buildPerModuleDefaultPipeline.argtypes = [
ffi.LLVMPassBuilderRef, c_int, c_int]
ffi.lib.LLVMPY_buildFunctionSimplificationPipeline.restype = \
ffi.LLVMFunctionPassManagerRef
ffi.lib.LLVMPY_buildFunctionSimplificationPipeline.argtypes = [
ffi.LLVMPassBuilderRef, c_int, c_int]
| PassBuilder |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 733442,
"end": 733789
} | class ____(sgqlc.types.Type, RepositoryNode):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("error", "pull_request")
error = sgqlc.types.Field(DependabotUpdateError, graphql_name="error")
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
| DependabotUpdate |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/button_multiline_label.py | {
"start": 80,
"end": 299
} | class ____(App):
def compose(self) -> ComposeResult:
yield Button("Button\nwith\nmulti-line\nlabel")
if __name__ == "__main__":
app = ButtonWithMultilineLabelApp()
app.run()
| ButtonWithMultilineLabelApp |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 1080,
"end": 1173
} | class ____:
instance: Factory
reveal_type(ClassC.instance, expected_text="ClassC")
| ClassC |
python | keras-team__keras | keras/src/ops/image.py | {
"start": 55173,
"end": 58020
} | class ____(Operation):
def __init__(
self,
kernel_size=(3, 3),
sigma=(1.0, 1.0),
data_format=None,
*,
name=None,
):
super().__init__(name=name)
self.kernel_size = kernel_size
self.sigma = sigma
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.gaussian_blur(
images,
kernel_size=self.kernel_size,
sigma=self.sigma,
data_format=self.data_format,
)
def compute_output_spec(self, images):
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
return KerasTensor(images.shape, dtype=images.dtype)
@keras_export("keras.ops.image.gaussian_blur")
def gaussian_blur(
images, kernel_size=(3, 3), sigma=(1.0, 1.0), data_format=None
):
"""Applies a Gaussian blur to the image(s).
Args:
images: Input image or batch of images. Must be 3D or 4D.
kernel_size: A tuple of two integers, specifying the height and width
of the Gaussian kernel.
sigma: A tuple of two floats, specifying the standard deviation of
the Gaussian kernel along height and width.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Blurred image or batch of images.
Examples:
>>> x = np.random.random((2, 64, 80, 3)) # batch of 2 RGB images
>>> y = keras.ops.image.gaussian_blur(x)
>>> y.shape
(2, 64, 80, 3)
>>> x = np.random.random((64, 80, 3)) # single RGB image
>>> y = keras.ops.image.gaussian_blur(x)
>>> y.shape
(64, 80, 3)
>>> x = np.random.random((2, 3, 64, 80)) # batch of 2 RGB images
>>> y = keras.ops.image.gaussian_blur(
... x, data_format="channels_first")
>>> y.shape
(2, 3, 64, 80)
"""
if any_symbolic_tensors((images,)):
return GaussianBlur(
kernel_size=kernel_size,
sigma=sigma,
data_format=data_format,
).symbolic_call(images)
return backend.image.gaussian_blur(
images,
kernel_size=kernel_size,
sigma=sigma,
data_format=data_format,
)
| GaussianBlur |
python | scrapy__scrapy | tests/test_loader.py | {
"start": 19658,
"end": 19754
} | class ____(ItemLoader):
default_item_class = FunctionProcessorItem
| FunctionProcessorItemLoader |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.