language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pandas-dev__pandas | pandas/tests/indexes/multi/test_lexsort.py | {
"start": 32,
"end": 626
} | class ____:
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(
levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
)
assert index._is_lexsorted()
index = MultiIndex(
levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]]
)
assert not index._is_lexsorted()
index = MultiIndex(
levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]]
)
assert not index._is_lexsorted()
assert index._lexsort_depth == 0
| TestIsLexsorted |
python | sympy__sympy | sympy/categories/diagram_drawing.py | {
"start": 4388,
"end": 6316
} | class ____:
"""
Holds a growable grid of objects.
Explanation
===========
It is possible to append or prepend a row or a column to the grid
using the corresponding methods. Prepending rows or columns has
the effect of changing the coordinates of the already existing
elements.
This class currently represents a naive implementation of the
functionality with little attempt at optimisation.
"""
def __init__(self, width, height):
self._width = width
self._height = height
self._array = [[None for j in range(width)] for i in range(height)]
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def __getitem__(self, i_j):
"""
Returns the element located at in the i-th line and j-th
column.
"""
i, j = i_j
return self._array[i][j]
def __setitem__(self, i_j, newvalue):
"""
Sets the element located at in the i-th line and j-th
column.
"""
i, j = i_j
self._array[i][j] = newvalue
def append_row(self):
"""
Appends an empty row to the grid.
"""
self._height += 1
self._array.append([None for j in range(self._width)])
def append_column(self):
"""
Appends an empty column to the grid.
"""
self._width += 1
for i in range(self._height):
self._array[i].append(None)
def prepend_row(self):
"""
Prepends the grid with an empty row.
"""
self._height += 1
self._array.insert(0, [None for j in range(self._width)])
def prepend_column(self):
"""
Prepends the grid with an empty column.
"""
self._width += 1
for i in range(self._height):
self._array[i].insert(0, None)
| _GrowableGrid |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 3476,
"end": 3626
} | class ____(Status):
arguments: str
call_id: str
name: str
type: str = "function_call"
id: str | None = None
| ResponseFunctionToolCall |
python | huggingface__transformers | src/transformers/models/pegasus_x/modeling_pegasus_x.py | {
"start": 53077,
"end": 60483
} | class ____(PegasusXPreTrainedModel):
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: PegasusXConfig):
super().__init__(config)
vocab_size = config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
padding_idx = config.pad_token_id
self.shared = PegasusXScaledWordEmbedding(
vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale
)
self.encoder = PegasusXEncoder(config)
self.decoder = PegasusXDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if `new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, Seq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Example:
```python
>>> from transformers import AutoTokenizer, PegasusModel
>>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-x-large")
>>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
>>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt")
>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 4, 1024]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The PEGASUS-X for conditional generation (e.g. summarization).
"""
)
| PegasusXModel |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 19300,
"end": 19883
} | class ____(Exception):
"""
Exception raised when performing an operation on non-numerical data.
For example, calling ``ohlc`` on a non-numerical column or a function
on a rolling window.
See Also
--------
Series.rolling : Provide rolling window calculations on Series object.
DataFrame.rolling : Provide rolling window calculations on DataFrame object.
Examples
--------
>>> ser = pd.Series(["a", "b", "c"])
>>> ser.rolling(2).sum()
Traceback (most recent call last):
DataError: No numeric types to aggregate
"""
| DataError |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 117390,
"end": 118617
} | class ____(BaseView):
"""Pretend our storage has a different type"""
target_dtype: torch.dtype
@classmethod
def create(cls, x: IRNode, new_dtype: torch.dtype) -> BaseView:
if is_storage_and_layout(x):
storage, old_layout = as_storage_and_layout(x)
new_layout = FixedLayout(
old_layout.device,
new_dtype,
old_layout.size,
old_layout.stride,
old_layout.offset,
old_layout.is_pinned,
)
return ReinterpretView(data=storage, layout=new_layout)
return DtypeView(data=x, target_dtype=new_dtype)
def __str__(self) -> str:
return self.str_helper([self.data, self.target_dtype])
__repr__ = __str__
@property
def dtype(self) -> torch.dtype:
return self.target_dtype
def get_size(self) -> Sequence[Expr]:
return self.data.get_size()
def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:
inner = self.data.make_loader()
def loader(idx: Sequence[Expr]) -> OpsValue:
return ops.to_dtype_bitcast(inner(idx), self.target_dtype, self.data.dtype)
return loader
| DtypeView |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py | {
"start": 2320,
"end": 2585
} | class ____(CParser, TestConstructor, Resolver):
"""Custom YAML loader that recognizes custom Ansible tags."""
def __init__(self, stream):
CParser.__init__(self, stream)
TestConstructor.__init__(self)
Resolver.__init__(self)
| TestLoader |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/methods/test_normalize.py | {
"start": 253,
"end": 3181
} | class ____:
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.skipif(
WASM, reason="tzset is available only on Unix-like systems, not WASM"
)
@pytest.mark.parametrize(
"timezone",
[
"US/Pacific",
"US/Eastern",
"UTC",
"Asia/Kolkata",
"Asia/Shanghai",
"Australia/Canberra",
],
)
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
| TestNormalize |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 8739,
"end": 8826
} | class ____(VyperException):
"""Invalid operator for a given type."""
| InvalidOperation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self2.py | {
"start": 784,
"end": 993
} | class ____:
def set_scale(self, scale: float) -> Self:
self.scale = scale
return self
@classmethod
def from_config(cls, config: dict[str, float]) -> Self:
return cls()
| Shape1 |
python | joke2k__faker | faker/providers/person/fr_DZ/__init__.py | {
"start": 44,
"end": 9101
} | class ____(PersonProvider):
formats_female = ("{{last_name}} {{first_name_female}}",)
formats_male = ("{{last_name}} {{first_name_male}}",)
formats = formats_male + formats_female
# Source: https://studentsoftheworld.info/penpals/stats_fr.php?Pays=ALG
# Last checked: 2025-09-27
first_names_male = (
"Abdelatif",
"Abdelkader",
"Abderaouf",
"Abderrahmane",
"Adam",
"Adel",
"Ahmed",
"Akram",
"Aladin",
"Ali",
"Amine",
"Amir",
"Anis",
"Arezki",
"Aymen",
"Ayoub",
"Chabane",
"Cherif",
"Djamel",
"Fares",
"Farid",
"Farouk",
"Fatteh",
"Ferhat",
"Fodil",
"Ghilas",
"Hamid",
"Hamza",
"Hocine",
"Houcine",
"Ibrahim",
"Ilyes",
"Kada",
"Khaled",
"Khalil",
"Lamine",
"Lotfi",
"Malik",
"Massinissa",
"Mehdi",
"Mohamed",
"Mohand",
"Mohammed",
"Mouloud",
"Mounir",
"Mourad",
"Moussa",
"Mustapha",
"Nacer",
"Nadir",
"Nassim",
"Nazim",
"Omar",
"Oussama",
"Ouanes",
"Rabah",
"Rachid",
"Ramzi",
"Riad",
"Rida",
"Rochdi",
"Saad",
"Said",
"Salah",
"Salim",
"Sami",
"Samir",
"Samy",
"Sofiane",
"Soufiane",
"Taha",
"Walid",
"Wassim",
"Yacine",
"Yahia",
"Yanis",
"Yasser",
"Youba",
"Youcef",
"Younes",
"Zakaria",
)
# Source: https://studentsoftheworld.info/penpals/stats_fr.php?Pays=ALG
# Last checked: 2025-09-27
first_names_female = (
"Amani",
"Amel",
"Amina",
"Amira",
"Anaïs",
"Anissa",
"Asma",
"Aya",
"Bouchra",
"Célia",
"Céline",
"Chanez",
"Chiraz",
"Dalia",
"Dounia",
"Farah",
"Fatima",
"Fella",
"Feriel",
"Fouzia",
"Hadjer",
"Hana",
"Hania",
"Hayat",
"Houda",
"Ikram",
"Imene",
"Ines",
"Jasmine",
"Kahina",
"Katia",
"Khadidja",
"Leila",
"Lila",
"Lilia",
"Lina",
"Lisa",
"Lydia",
"Lyna",
"Lynda",
"Malak",
"Manel",
"Maria",
"Marwa",
"Maya",
"Mélissa",
"Meriem",
"Mina",
"Mira",
"Myriam",
"Nadia",
"Narimane",
"Nawal",
"Nedjma",
"Nesrine",
"Nihad",
"Nour",
"Racha",
"Rania",
"Rim",
"Rose",
"Rym",
"Ryma",
"Sabrina",
"Safia",
"Sahra",
"Salima",
"Salma",
"Samira",
"Sara",
"Sarah",
"Selma",
"Serine",
"Sofia",
"Sonia",
"Sophia",
"Souad",
"Soumia",
"Thinhinane",
"Wafae",
"Yasmine",
"Yasmina",
"Yousra",
"Zina",
"Zineb",
)
first_names = first_names_male + first_names_female
# Source: https://fr.geneawiki.com/wiki/Noms_de_famille_alg%C3%A9riens
# Last checked: 2025-09-27
last_names = (
"Abada",
"Abbad",
"Abbas",
"Abbassi",
"Abbes",
"Abdi",
"Abdelli",
"Abdellaoui",
"Abdelaziz",
"Abdou",
"Abed",
"Abid",
"Abou",
"Abouda",
"Aboukir",
"Achour",
"Achouri",
"Adda",
"Aissaoui",
"Aissani",
"Allal",
"Allali",
"Amara",
"Amari",
"Ameur",
"Ammari",
"Amrane",
"Amrani",
"Amrouche",
"Amri",
"Arab",
"Aribi",
"Attia",
"Ayad",
"Ayadi",
"Azzouz",
"Azizi",
"Bacha",
"Bahloul",
"Bahri",
"Bakhti",
"Bakhouche",
"Baghdadi",
"Belarbi",
"Belaid",
"Belaidi",
"Belbachir",
"Belhadj",
"Belkacem",
"Belkacemi",
"Belkadi",
"Belkheir",
"Belkhiri",
"Benabdellah",
"Benahmed",
"Benali",
"Benamar",
"Benamara",
"Benameur",
"Benaicha",
"Benaissa",
"Benaouda",
"Bencheikh",
"Bensalem",
"Bensaid",
"Bensalah",
"Benslimane",
"Benyahia",
"Benyamina",
"Benmoussa",
"Benyoucef",
"Benziane",
"Berkane",
"Berkani",
"Bettahar",
"Bey",
"Boubekeur",
"Bouabdellah",
"Bouafia",
"Boualem",
"Bouali",
"Bouaziz",
"Bouchama",
"Bouchareb",
"Boucetta",
"Boudiaf",
"Boudjemaa",
"Boudraa",
"Bouguerra",
"Boukhari",
"Boukhalfa",
"Boukhatem",
"Boumaza",
"Boumediene",
"Bounab",
"Boussaid",
"Boutaleb",
"Bouziane",
"Bouzid",
"Bouzidi",
"Brahimi",
"Brahmi",
"Chaib",
"Chabane",
"Charef",
"Chaoui",
"Chibani",
"Chikh",
"Chergui",
"Cherif",
"Cherifi",
"Cheriet",
"Cheikh",
"Chellaoua",
"Daoud",
"Daoudi",
"Dahmane",
"Dahmani",
"Derbal",
"Derradji",
"Derkaoui",
"Derouiche",
"Dib",
"Diaf",
"Dif",
"Djebbar",
"Djellal",
"Djellouli",
"Djoudi",
"Fares",
"Fellah",
"Ferhat",
"Filali",
"Gacem",
"Gasmi",
"Ghazi",
"Gharbi",
"Gherbi",
"Guessoum",
"Guendouz",
"Guerfi",
"Hadjadj",
"Hadji",
"Haddad",
"Haddouche",
"Hachemi",
"Hamel",
"Hamadouche",
"Hamadi",
"Hamdani",
"Hamdi",
"Hamidi",
"Hamlaoui",
"Hammadi",
"Hamoudi",
"Hamza",
"Hamzaoui",
"Hassani",
"Henni",
"Hocine",
"Houari",
"Ikhlef",
"Kaci",
"Kaddour",
"Kaddouri",
"Kadi",
"Kadri",
"Kamel",
"Kara",
"Kebaili",
"Kebir",
"Khaldi",
"Khaled",
"Khelif",
"Khelifa",
"Khelifi",
"Khelil",
"Korichi",
"Kouidri",
"Laib",
"Lakehal",
"Lakhal",
"Lakhdari",
"Lamri",
"Laouar",
"Larbi",
"Laribi",
"Latreche",
"Lahmar",
"Lamri",
"Laribi",
"Latreche",
"Lounis",
"Loucif",
"Madani",
"Madi",
"Mahmoudi",
"Mahi",
"Malki",
"Malek",
"Mansour",
"Mansouri",
"Maouche",
"Makhlouf",
"Makhloufi",
"Mazouz",
"Mazouzi",
"Mebarki",
"Mecheri",
"Meftah",
"Medjahed",
"Meddah",
"Meziane",
"Meziani",
"Mesbah",
"Messaoudi",
"Merabet",
"Merah",
"Merzougui",
"Merzoug",
"Mihoubi",
"Miloudi",
"Mimouni",
"Mokadem",
"Mokrane",
"Mokrani",
"Mokhtari",
"Mohammedi",
"Mostefaoui",
"Morsli",
"Moulay",
"Moussa",
"Moussaoui",
"Nacer",
"Naili",
"Nasri",
"Nedjar",
"Nouar",
"Noui",
"Nouri",
"Ouali",
"Ouchene",
"Otmani",
"Rabhi",
"Rachedi",
"Rabia",
"Rahmani",
"Rahal",
"Rahmouni",
"Rahmoune",
"Ramdani",
"Rais",
"Rezig",
"Sabri",
"Saci",
"Saad",
"Saadi",
"Saidi",
"Said",
"Saidani",
"Sahli",
"Sahraoui",
"Salem",
"Salhi",
"Salmi",
"Salah",
"Saoudi",
"Sayah",
"Seddiki",
"Selami",
"Senouci",
"Slimani",
"Smail",
"Soudani",
"Soltani",
"Taibi",
"Tabet",
"Tahraoui",
"Tahri",
"Talbi",
"Taleb",
"Touati",
"Touil",
"Toumi",
"Yahi",
"Yahia",
"Yahiaoui",
"Yousfi",
"Zaidi",
"Zaoui",
"Zeroual",
"Zerrouki",
"Ziane",
"Ziani",
"Zidane",
"Zitouni",
"Zouaoui",
)
| Provider |
python | dask__distributed | distributed/tests/test_failed_workers.py | {
"start": 9628,
"end": 19280
} | class ____:
def __init__(self, data, delay=0.1):
self.delay = delay
self.data = data
def __reduce__(self):
sleep(self.delay)
return SlowTransmitData, (self.data, self.delay)
def __sizeof__(self) -> int:
# Ensure this is offloaded to avoid blocking loop
return parse_bytes(dask.config.get("distributed.comm.offload")) + 1
@pytest.mark.slow
@gen_cluster(client=True, config={"distributed.scheduler.work-stealing": False})
async def test_worker_who_has_clears_after_failed_connection(c, s, a, b):
"""This test is very sensitive to cluster state consistency. Timeouts often
indicate subtle deadlocks. Be mindful when marking flaky/repeat/etc."""
async with Nanny(s.address, nthreads=2, worker_class=BlockedGetData) as n:
while len(s.workers) < 3:
await asyncio.sleep(0.01)
n_worker_address = n.worker_address
futures = c.map(
inc,
range(20),
key=["f%d" % i for i in range(20)],
workers=[n_worker_address],
allow_other_workers=True,
)
def sink(*args):
pass
await wait(futures)
result_fut = c.submit(sink, futures, workers=a.address)
await n.kill(timeout=1)
while len(s.workers) > 2:
await asyncio.sleep(0.01)
await result_fut
assert not a.state.has_what.get(n_worker_address)
assert not any(
n_worker_address in s for ts in a.state.tasks.values() for s in ts.who_has
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2), ("127.0.0.1", 3)],
config=NO_AMM,
)
async def test_worker_same_host_replicas_missing(c, s, a, b, x):
# See GH4784
def mock_address_host(addr):
# act as if A and X are on the same host
nonlocal a, b, x
if addr in [a.address, x.address]:
return "A"
else:
return "B"
with mock.patch("distributed.worker.get_address_host", mock_address_host):
futures = c.map(
slowinc,
range(20),
delay=0.1,
key=["f%d" % i for i in range(20)],
workers=[a.address],
allow_other_workers=True,
)
await wait(futures)
# replicate data to avoid the scheduler retriggering the computation
# retriggering cleans up the state nicely but doesn't reflect real world
# scenarios where there may be replicas on the cluster, e.g. they are
# replicated as a dependency somewhere else
await c.replicate(futures, n=2, workers=[a.address, b.address])
def sink(*args):
pass
# Since A and X are mocked to be co-located, X will consistently pick A
# to fetch data from. It will never succeed since we're removing data
# artificially, without notifying the scheduler.
# This can only succeed if B handles the missing data properly by
# removing A from the known sources of keys
a.handle_stimulus(
FreeKeysEvent(keys=["f1"], stimulus_id="Am I evil?")
) # Yes, I am!
result_fut = c.submit(sink, futures, workers=x.address)
await result_fut
@pytest.mark.slow
@gen_cluster(client=True, timeout=60, Worker=Nanny, nthreads=[("127.0.0.1", 1)])
async def test_restart_timeout_on_long_running_task(c, s, a):
with captured_logger("distributed.scheduler") as sio:
future = c.submit(sleep, 3600)
await asyncio.sleep(0.1)
await c.restart()
text = sio.getvalue()
assert "timeout" not in text.lower()
@pytest.mark.slow
@gen_cluster(client=True, config={"distributed.scheduler.worker-ttl": "500ms"})
async def test_worker_time_to_live(c, s, a, b):
# Note that this value is ignored because is less than 10x heartbeat_interval
assert s.worker_ttl == 0.5
assert set(s.workers) == {a.address, b.address}
assert all(
event["action"] != "worker-ttl-timed-out"
for _, event in s.get_events("scheduler")
)
a.periodic_callbacks["heartbeat"].stop()
start = time()
while set(s.workers) == {a.address, b.address}:
await asyncio.sleep(0.01)
assert set(s.workers) == {b.address}
events = [
event
for _, event in s.get_events("scheduler")
if event["action"] == "worker-ttl-timed-out"
]
assert len(events) == 1
# This event includes the actual TTL that we applied, i.e, 10 * heartbeat.
assert events[0] == {
"action": "worker-ttl-timed-out",
"workers": [a.address],
"ttl": 5.0,
}
# Worker removal is triggered after 10 * heartbeat
# This is 10 * 0.5s at the moment of writing.
# Currently observing an extra 0.3~0.6s on top of the interval.
# Adding some padding to prevent flakiness.
assert time() - start < 7
@pytest.mark.slow
@pytest.mark.parametrize("block_evloop", [False, True])
@gen_cluster(
client=True,
Worker=Nanny,
nthreads=[("", 1)],
scheduler_kwargs={"worker_ttl": "500ms", "allowed_failures": 0},
)
async def test_worker_ttl_restarts_worker(c, s, a, block_evloop):
"""If the event loop of a worker becomes completely unresponsive, the scheduler will
restart it through the nanny.
"""
ws = s.workers[a.worker_address]
async def f():
w = get_worker()
w.periodic_callbacks["heartbeat"].stop()
if block_evloop:
sleep(9999) # Block event loop indefinitely
else:
await asyncio.sleep(9999)
fut = c.submit(f, key="x")
while not s.workers or (
(new_ws := next(iter(s.workers.values()))) is ws
or new_ws.status != Status.running
):
await asyncio.sleep(0.01)
if block_evloop:
# The nanny killed the worker with SIGKILL.
# The restart has increased the suspicious count.
with pytest.raises(KilledWorker):
await fut
assert s.tasks["x"].state == "erred"
assert s.tasks["x"].suspicious == 1
else:
# The nanny sent to the WorkerProcess a {op: stop} through IPC, which in turn
# successfully invoked Worker.close(nanny=False).
# This behaviour makes sense as the worker-ttl timeout was most likely caused
# by a failure in networking, rather than a hung process.
assert s.tasks["x"].state == "processing"
assert s.tasks["x"].suspicious == 0
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
nthreads=[("", 2)],
scheduler_kwargs={"allowed_failures": 0},
)
async def test_restart_hung_worker(c, s, a):
"""Test restart_workers() to restart a worker whose event loop has become completely
unresponsive.
"""
ws = s.workers[a.worker_address]
async def f():
w = get_worker()
w.periodic_callbacks["heartbeat"].stop()
sleep(9999) # Block event loop indefinitely
fut = c.submit(f)
# Wait for worker to hang
with pytest.raises(asyncio.TimeoutError):
while True:
await wait(c.submit(inc, 1, pure=False), timeout=0.2)
await c.restart_workers([a.worker_address])
assert len(s.workers) == 1
assert next(iter(s.workers.values())) is not ws
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_forget_data_not_supposed_to_have(c, s, a):
"""If a dependency fetch finishes on a worker after the scheduler already released
everything, the worker might be stuck with a redundant replica which is never
cleaned up.
"""
async with BlockedGatherDep(s.address) as b:
x = c.submit(inc, 1, key="x", workers=[a.address])
y = c.submit(inc, x, key="y", workers=[b.address])
await b.in_gather_dep.wait()
assert b.state.tasks["x"].state == "flight"
x.release()
y.release()
while s.tasks:
await asyncio.sleep(0.01)
b.block_gather_dep.set()
while b.state.tasks:
await asyncio.sleep(0.01)
@gen_cluster(
client=True,
nthreads=[("", 1)] * 2,
config=merge(NO_AMM, {"distributed.comm.timeouts.connect": "1s"}),
)
async def test_failing_worker_with_additional_replicas_on_cluster(c, s, w0, w2):
"""
If a worker detects a missing dependency, the scheduler is notified. If no
other replica is available, the dependency is rescheduled. A reschedule
typically causes a lot of state to be reset. However, if another replica is
available, we'll need to ensure that the worker can detect outdated state
and correct its state.
"""
def dummy(*args, **kwargs):
return
async with BlockedGatherDep(s.address) as w1:
f1 = c.submit(
inc,
1,
key="f1",
workers=[w0.worker_address],
)
# We'll schedule tasks on two workers, s.t. f1 is replicated. We will
# suspend one of the workers and kill the origin worker of f1 such that a
# comm failure causes the worker to handle a missing dependency. It will ask
# the schedule such that it knows that a replica is available on f2 and
# reschedules the fetch
f2 = c.submit(dummy, f1, key="f2", workers=[w1.worker_address])
f3 = c.submit(dummy, f1, key="f3", workers=[w2.worker_address])
await w1.in_gather_dep.wait()
await wait(f3)
# Because of this line we need to disable AMM; otherwise it could choose to delete
# the replicas of f1 on w1 and w2 and keep the one on w0.
await w0.close()
w1.block_gather_dep.set()
await c.gather([f1, f2, f3])
| SlowTransmitData |
python | conda__conda | conda/gateways/logging.py | {
"start": 762,
"end": 2020
} | class ____(Filter):
TOKEN_URL_PATTERN = re.compile(
r"(|https?://)" # \1 scheme
r"(|\s" # \2 space, or
r"|(?:(?:\d{1,3}\.){3}\d{1,3})" # ipv4, or
r"|(?:" # domain name
r"(?:[a-zA-Z0-9-]{1,20}\.){0,10}" # non-tld
r"(?:[a-zA-Z]{2}[a-zA-Z0-9-]{0,18})" # tld
r"))" # end domain name
r"(|:\d{1,5})?" # \3 port
r"/t/[a-z0-9A-Z-]+/" # token
)
TOKEN_REPLACE = staticmethod(partial(TOKEN_URL_PATTERN.sub, r"\1\2\3/t/<TOKEN>/"))
def filter(self, record):
"""
Since Python 2's getMessage() is incapable of handling any
strings that are not unicode when it interpolates the message
with the arguments, we fix that here by doing it ourselves.
At the same time we replace tokens in the arguments which was
not happening until now.
"""
if not isinstance(record.msg, str):
# This should always be the case but it's not checked so
# we avoid any potential logging errors.
return True
if record.args:
record.msg = record.msg % record.args
record.args = None
record.msg = self.TOKEN_REPLACE(record.msg)
return True
| TokenURLFilter |
python | getsentry__sentry | tests/sentry/receivers/test_releases.py | {
"start": 1224,
"end": 1722
} | class ____(TestCase):
@patch("sentry.tasks.clear_expired_resolutions.clear_expired_resolutions.delay")
def test_simple(self, mock_delay: MagicMock) -> None:
with self.capture_on_commit_callbacks(execute=True):
release = Release.objects.create(
version="a", organization_id=self.project.organization_id
)
release.add_project(self.project)
mock_delay.assert_called_once_with(release_id=release.id)
| ResolveGroupResolutionsTest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_highlight.py | {
"start": 54,
"end": 882
} | class ____(util.MdCase):
"""Test that highlighting works with guessing."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'guess_lang': True
}
}
def test_guess(self):
"""Test guessing."""
self.check_markdown(
r'''
```
import test
test.test()
```
''',
'''
<div class="highlight"><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
<span class="n">test</span><span class="o">.</span><span class="n">test</span><span class="p">()</span>
</code></pre></div>
''', # noqa: E501
True
)
| TestHighlightGuess |
python | keras-team__keras | keras/src/metrics/reduction_metrics_test.py | {
"start": 337,
"end": 1910
} | class ____(testing.TestCase):
def test_config(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
self.assertEqual(sum_obj.name, "sum")
self.assertEqual(len(sum_obj.variables), 1)
self.assertEqual(sum_obj._dtype, "float32")
# Check save and restore config
sum_obj2 = reduction_metrics.Sum.from_config(sum_obj.get_config())
self.assertEqual(sum_obj2.name, "sum")
self.assertEqual(len(sum_obj2.variables), 1)
self.assertEqual(sum_obj2._dtype, "float32")
def test_unweighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7])
result = sum_obj.result()
self.assertAllClose(result, 16.0, atol=1e-3)
def test_weighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = sum_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted_nd(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 9.0, atol=1e-3)
def test_weighted_nd_broadcast(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 6.0, atol=1e-3)
| SumTest |
python | numba__numba | numba/tests/test_gdb_dwarf.py | {
"start": 393,
"end": 2196
} | class ____(TestCase):
# This runs the tests in numba.tests.gdb, each submodule must contain one
# test class called "Test" and it must contain one test called "test".
# Variation is provided by the module name. The reason this convention exits
# is because gdb tests tend to be line number sensitive (breakpoints etc
# care about this) and doing this prevents constant churn and permits the
# reuse of the existing subprocess_test_runner harness.
_NUMBA_OPT_0_ENV = {'NUMBA_OPT': '0'}
def _gdb_has_numpy(self):
"""Returns True if gdb has NumPy support, False otherwise"""
driver = GdbMIDriver(__file__, debug=False,)
has_numpy = driver.supports_numpy()
driver.quit()
return has_numpy
def _subprocess_test_runner(self, test_mod):
themod = f'numba.tests.gdb.{test_mod}'
self.subprocess_test_runner(test_module=themod,
test_class='Test',
test_name='test',
envvars=self._NUMBA_OPT_0_ENV)
def test_basic(self):
self._subprocess_test_runner('test_basic')
def test_array_arg(self):
self._subprocess_test_runner('test_array_arg')
def test_conditional_breakpoint(self):
self._subprocess_test_runner('test_conditional_breakpoint')
def test_break_on_symbol(self):
self._subprocess_test_runner('test_break_on_symbol')
def test_break_on_symbol_version(self):
self._subprocess_test_runner('test_break_on_symbol_version')
def test_pretty_print(self):
if not self._gdb_has_numpy():
_msg = "Cannot find gdb with NumPy support"
self.skipTest(_msg)
self._subprocess_test_runner('test_pretty_print')
| TestGDBDwarf |
python | pypa__pip | src/pip/_vendor/pygments/lexer.py | {
"start": 23592,
"end": 27965
} | class ____(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: At all time there is a stack of states. Initially, the stack contains
#: a single state 'root'. The top of the stack is called "the current state".
#:
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: ``new_state`` can be omitted to signify no state transition.
#: If ``new_state`` is a string, it is pushed on the stack. This ensure
#: the new current state is ``new_state``.
#: If ``new_state`` is a tuple of strings, all of those strings are pushed
#: on the stack and the current state will be the last element of the list.
#: ``new_state`` can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again. Note that if you push while in a combined state, the combined
#: state itself is pushed, and not only the state in which the rule is
#: defined.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the initial stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
yield from action(self, m)
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
if len(statestack) > 1:
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop, but keep at least one state on the stack
# (random code leading to unexpected pops should
# not allow exceptions)
if abs(new_state) >= len(statestack):
del statestack[1:]
else:
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, f"wrong state def: {new_state!r}"
statetokens = tokendefs[statestack[-1]]
break
else:
# We are here only if all state tokens have been considered
# and there was not a match on any of them.
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Whitespace, '\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
| RegexLexer |
python | pytorch__pytorch | torch/utils/data/sampler.py | {
"start": 584,
"end": 3752
} | class ____(Generic[_T_co]):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
way to iterate over indices or lists of indices (batches) of dataset elements,
and may provide a :meth:`__len__` method that returns the length of the returned iterators.
Example:
>>> # xdoctest: +SKIP
>>> class AccedingSequenceLengthSampler(Sampler[int]):
>>> def __init__(self, data: List[str]) -> None:
>>> self.data = data
>>>
>>> def __len__(self) -> int:
>>> return len(self.data)
>>>
>>> def __iter__(self) -> Iterator[int]:
>>> sizes = torch.tensor([len(x) for x in self.data])
>>> yield from torch.argsort(sizes).tolist()
>>>
>>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]):
>>> def __init__(self, data: List[str], batch_size: int) -> None:
>>> self.data = data
>>> self.batch_size = batch_size
>>>
>>> def __len__(self) -> int:
>>> return (len(self.data) + self.batch_size - 1) // self.batch_size
>>>
>>> def __iter__(self) -> Iterator[List[int]]:
>>> sizes = torch.tensor([len(x) for x in self.data])
>>> for batch in torch.chunk(torch.argsort(sizes), len(self)):
>>> yield batch.tolist()
.. note:: The :meth:`__len__` method isn't strictly required by
:class:`~torch.utils.data.DataLoader`, but is expected in any
calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
"""
def __iter__(self) -> Iterator[_T_co]:
raise NotImplementedError
# NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
#
# Many times we have an abstract class representing a collection/iterable of
# data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
# implementing a `__len__` method. In such cases, we must make sure to not
# provide a default implementation, because both straightforward default
# implementations have their issues:
#
# + `return NotImplemented`:
# Calling `len(subclass_instance)` raises:
# TypeError: 'NotImplementedType' object cannot be interpreted as an integer
#
# + `raise NotImplementedError`:
# This prevents triggering some fallback behavior. E.g., the built-in
# `list(X)` tries to call `len(X)` first, and executes a different code
# path if the method is not found or `NotImplemented` is returned, while
# raising a `NotImplementedError` will propagate and make the call fail
# where it could have used `__iter__` to complete the call.
#
# Thus, the only two sensible things to do are
#
# + **not** provide a default `__len__`.
#
# + raise a `TypeError` instead, which is what Python uses when users call
# a method that is not defined on an object.
# (@ssnl verifies that this works on at least Python 3.7.)
| Sampler |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 68049,
"end": 72492
} | class ____:
arr1 = np.arange(3)
arr2 = np.arange(3)
expected = np.array(
[[0, 0, 0],
[0, 1, 2],
[0, 2, 4]]
)
assert_array_equal(np.linalg.outer(arr1, arr2), expected)
with assert_raises_regex(
ValueError, "Input arrays must be one-dimensional"
):
np.linalg.outer(arr1[:, np.newaxis], arr2)
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.view(arr.dtype.newbyteorder(native))
sw_arr = arr.view(arr.dtype.newbyteorder("S")).byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
@pytest.mark.skipif(
threading.active_count() > 1,
reason="skipping test that uses fork because there are multiple threads")
@pytest.mark.skipif(
NOGIL_BUILD,
reason="Cannot safely use fork in tests on the free-threaded build")
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
pytest.skip("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except Exception:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
pytest.skip('Numpy xerbla not linked in.')
@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")
@pytest.mark.slow
def test_sdot_bug_8577():
# Regression test that loading certain other libraries does not
# result to wrong results in float32 linear algebra.
#
# There's a bug gh-8577 on OSX that can trigger this, and perhaps
# there are also other situations in which it occurs.
#
# Do the check in a separate process.
bad_libs = ['PyQt5.QtWidgets', 'IPython']
template = textwrap.dedent("""
import sys
{before}
try:
import {bad_lib}
except ImportError:
sys.exit(0)
{after}
x = np.ones(2, dtype=np.float32)
sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)
""")
for bad_lib in bad_libs:
code = template.format(before="import numpy as np", after="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
# Swapped import order
code = template.format(after="import numpy as np", before="",
bad_lib=bad_lib)
subprocess.check_call([sys.executable, "-c", code])
| TestOuter |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-coloring-game.py | {
"start": 191,
"end": 838
} | class ____(object):
def btreeGameWinningMove(self, root, n, x):
"""
:type root: TreeNode
:type n: int
:type x: int
:rtype: bool
"""
def count(node, x, left_right):
if not node:
return 0
left, right = count(node.left, x, left_right), count(node.right, x, left_right)
if node.val == x:
left_right[0], left_right[1] = left, right
return left + right + 1
left_right = [0, 0]
count(root, x, left_right)
blue = max(max(left_right), n-(sum(left_right)+1))
return blue > n-blue
| Solution |
python | pyodide__pyodide | tools/backport.py | {
"start": 6137,
"end": 8441
} | class ____:
"""A section of the changelog for a particular version of Pyodide
Introduced by ### or ##. Ends when there is another line with ### or ##.
header:
Consists of all the lines starting with and the subsection start "###"
line and including all content lines up untile the first line that
starts with -. Generally this will be a heading like "### Packages" plus
one or more empty lines. The first `ChangelogSection` in a
`ChangelogVersion` may have an empty heading.
paragraphs:
The list of paragraphs.
cur_paragraph:
Parser state.
"""
header: list[str] = field(default_factory=list)
paragraphs: list[ChangelogParagraph] = field(default_factory=list)
cur_paragraph: ChangelogParagraph = field(default_factory=ChangelogParagraph)
def get_text(self) -> str:
"""Unparse the subsection"""
header = ""
if self.header:
header = "\n".join(self.header) + "\n"
res = header + "".join(x.get_text() for x in self.paragraphs)
# Special case: if the last entry already ends in a blank line, we don't
# add another one. This keeps the spacing more consistent with the
# backported entries.
if not res.endswith("\n\n"):
res += "\n"
return res
def __bool__(self) -> bool:
return bool(self.header or self.paragraphs or self.cur_paragraph)
def append(self, line: str) -> None:
"""Main parsing logic."""
if line.strip() == "":
if self.cur_paragraph:
self.finish_paragraph()
else:
self.header.append(line)
return
if self.cur_paragraph or line.startswith("-"):
self.cur_paragraph.append(line)
else:
self.header.append(line)
def finish_paragraph(self) -> None:
"""If cur_paragraph is nonempty, add it to entries. Then empty out cur_paragraph"""
if self.cur_paragraph:
self.cur_paragraph.finish_entry()
self.paragraphs.append(self.cur_paragraph)
self.cur_paragraph = ChangelogParagraph()
PrChangelogIndex = namedtuple(
"PrChangelogIndex", ["subsection", "paragraph", "entry", "is_unique"]
)
@dataclass
| ChangelogSection |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 714,
"end": 1618
} | class ____(unittest.TestCase):
def test_it(self):
import types
from venusian import ATTACH_ATTR
self.assertTrue(getattr(wsgiapptest, ATTACH_ATTR))
self.assertIsInstance(wsgiapptest, types.FunctionType)
context = DummyContext()
request = DummyRequest()
result = wsgiapptest(context, request)
self.assertEqual(result, '123')
def test_scanned(self):
from pyramid.config import Configurator
from pyramid.interfaces import IRequest, IView, IViewClassifier
from . import test_integration
config = Configurator()
config.scan(test_integration)
config.commit()
reg = config.registry
view = reg.adapters.lookup(
(IViewClassifier, IRequest, INothing), IView, name=''
)
self.assertEqual(view.__original_view__, wsgiapptest)
| WGSIAppPlusViewConfigTests |
python | pytorch__pytorch | test/package/test_misc.py | {
"start": 620,
"end": 12817
} | class ____(PackageTestCase):
"""Tests for one-off or random functionality. Try not to add to this!"""
def test_file_structure(self):
"""
Tests package's Directory structure representation of a zip file. Ensures
that the returned Directory prints what is expected and filters
inputs/outputs correctly.
"""
buffer = BytesIO()
export_plain = dedent(
"""\
\u251c\u2500\u2500 .data
\u2502 \u251c\u2500\u2500 extern_modules
\u2502 \u251c\u2500\u2500 python_version
\u2502 \u251c\u2500\u2500 serialization_id
\u2502 \u2514\u2500\u2500 version
\u251c\u2500\u2500 main
\u2502 \u2514\u2500\u2500 main
\u251c\u2500\u2500 obj
\u2502 \u2514\u2500\u2500 obj.pkl
\u251c\u2500\u2500 package_a
\u2502 \u251c\u2500\u2500 __init__.py
\u2502 \u2514\u2500\u2500 subpackage.py
\u251c\u2500\u2500 byteorder
\u2514\u2500\u2500 module_a.py
"""
)
export_include = dedent(
"""\
\u251c\u2500\u2500 obj
\u2502 \u2514\u2500\u2500 obj.pkl
\u2514\u2500\u2500 package_a
\u2514\u2500\u2500 subpackage.py
"""
)
import_exclude = dedent(
"""\
\u251c\u2500\u2500 .data
\u2502 \u251c\u2500\u2500 extern_modules
\u2502 \u251c\u2500\u2500 python_version
\u2502 \u251c\u2500\u2500 serialization_id
\u2502 \u2514\u2500\u2500 version
\u251c\u2500\u2500 main
\u2502 \u2514\u2500\u2500 main
\u251c\u2500\u2500 obj
\u2502 \u2514\u2500\u2500 obj.pkl
\u251c\u2500\u2500 package_a
\u2502 \u251c\u2500\u2500 __init__.py
\u2502 \u2514\u2500\u2500 subpackage.py
\u251c\u2500\u2500 byteorder
\u2514\u2500\u2500 module_a.py
"""
)
with PackageExporter(buffer) as he:
import module_a
import package_a
import package_a.subpackage
obj = package_a.subpackage.PackageASubpackageObject()
he.intern("**")
he.save_module(module_a.__name__)
he.save_module(package_a.__name__)
he.save_pickle("obj", "obj.pkl", obj)
he.save_text("main", "main", "my string")
buffer.seek(0)
hi = PackageImporter(buffer)
file_structure = hi.file_structure()
# remove first line from testing because WINDOW/iOS/Unix treat the buffer differently
self.assertEqual(
dedent("\n".join(str(file_structure).split("\n")[1:])),
export_plain,
)
file_structure = hi.file_structure(include=["**/subpackage.py", "**/*.pkl"])
self.assertEqual(
dedent("\n".join(str(file_structure).split("\n")[1:])),
export_include,
)
file_structure = hi.file_structure(exclude="**/*.storage")
self.assertEqual(
dedent("\n".join(str(file_structure).split("\n")[1:])),
import_exclude,
)
def test_loaders_that_remap_files_work_ok(self):
from importlib.abc import MetaPathFinder
from importlib.machinery import SourceFileLoader
from importlib.util import spec_from_loader
class LoaderThatRemapsModuleA(SourceFileLoader):
def get_filename(self, name):
result = super().get_filename(name)
if name == "module_a":
return os.path.join(
os.path.dirname(result), "module_a_remapped_path.py"
)
else:
return result
class FinderThatRemapsModuleA(MetaPathFinder):
def find_spec(self, fullname, path, target):
"""Try to find the original spec for module_a using all the
remaining meta_path finders."""
if fullname != "module_a":
return None
spec = None
for finder in sys.meta_path:
if finder is self:
continue
if hasattr(finder, "find_spec"):
spec = finder.find_spec(fullname, path, target=target)
elif hasattr(finder, "load_module"):
spec = spec_from_loader(fullname, finder)
if spec is not None:
break
assert spec is not None and isinstance(spec.loader, SourceFileLoader)
spec.loader = LoaderThatRemapsModuleA(
spec.loader.name, spec.loader.path
)
return spec
sys.meta_path.insert(0, FinderThatRemapsModuleA())
# clear it from sys.modules so that we use the custom finder next time
# it gets imported
sys.modules.pop("module_a", None)
try:
buffer = BytesIO()
with PackageExporter(buffer) as he:
import module_a
he.intern("**")
he.save_module(module_a.__name__)
buffer.seek(0)
hi = PackageImporter(buffer)
self.assertTrue("remapped_path" in hi.get_source("module_a"))
finally:
# pop it again to ensure it does not mess up other tests
sys.modules.pop("module_a", None)
sys.meta_path.pop(0)
def test_python_version(self):
"""
Tests that the current python version is stored in the package and is available
via PackageImporter's python_version() method.
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
from package_a.test_module import SimpleTest
he.intern("**")
obj = SimpleTest()
he.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
hi = PackageImporter(buffer)
self.assertEqual(hi.python_version(), platform.python_version())
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_python_version_from_package(self):
"""Tests loading a package with a python version embedded"""
importer1 = PackageImporter(
f"{Path(__file__).parent}/package_e/test_nn_module.pt"
)
self.assertEqual(importer1.python_version(), "3.9.7")
def test_file_structure_has_file(self):
"""
Test Directory's has_file() method.
"""
buffer = BytesIO()
with PackageExporter(buffer) as he:
import package_a.subpackage
he.intern("**")
obj = package_a.subpackage.PackageASubpackageObject()
he.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
importer = PackageImporter(buffer)
file_structure = importer.file_structure()
self.assertTrue(file_structure.has_file("package_a/subpackage.py"))
self.assertFalse(file_structure.has_file("package_a/subpackage"))
def test_exporter_content_lists(self):
"""
Test content list API for PackageExporter's contained modules.
"""
with PackageExporter(BytesIO()) as he:
import package_b
he.extern("package_b.subpackage_1")
he.mock("package_b.subpackage_2")
he.intern("**")
he.save_pickle("obj", "obj.pkl", package_b.PackageBObject(["a"]))
self.assertEqual(he.externed_modules(), ["package_b.subpackage_1"])
self.assertEqual(he.mocked_modules(), ["package_b.subpackage_2"])
self.assertEqual(
he.interned_modules(),
["package_b", "package_b.subpackage_0.subsubpackage_0"],
)
self.assertEqual(he.get_rdeps("package_b.subpackage_2"), ["package_b"])
with self.assertRaises(PackagingError):
with PackageExporter(BytesIO()) as he:
import package_b
he.deny("package_b")
he.save_pickle("obj", "obj.pkl", package_b.PackageBObject(["a"]))
self.assertEqual(he.denied_modules(), ["package_b"])
def test_is_from_package(self):
"""is_from_package should work for objects and modules"""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
pi = PackageImporter(buffer)
mod = pi.import_module("package_a.subpackage")
loaded_obj = pi.load_pickle("obj", "obj.pkl")
self.assertFalse(is_from_package(package_a.subpackage))
self.assertTrue(is_from_package(mod))
self.assertFalse(is_from_package(obj))
self.assertTrue(is_from_package(loaded_obj))
def test_inspect_class(self):
"""Should be able to retrieve source for a packaged class."""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
pi = PackageImporter(buffer)
packaged_class = pi.import_module(
"package_a.subpackage"
).PackageASubpackageObject
regular_class = package_a.subpackage.PackageASubpackageObject
packaged_src = inspect.getsourcelines(packaged_class)
regular_src = inspect.getsourcelines(regular_class)
self.assertEqual(packaged_src, regular_src)
def test_dunder_package_present(self):
"""
The attribute '__torch_package__' should be populated on imported modules.
"""
import package_a.subpackage
buffer = BytesIO()
obj = package_a.subpackage.PackageASubpackageObject()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", obj)
buffer.seek(0)
pi = PackageImporter(buffer)
mod = pi.import_module("package_a.subpackage")
self.assertTrue(hasattr(mod, "__torch_package__"))
def test_dunder_package_works_from_package(self):
"""
The attribute '__torch_package__' should be accessible from within
the module itself, so that packaged code can detect whether it's
being used in a packaged context or not.
"""
import package_a.use_dunder_package as mod
buffer = BytesIO()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_module(mod.__name__)
buffer.seek(0)
pi = PackageImporter(buffer)
imported_mod = pi.import_module(mod.__name__)
self.assertTrue(imported_mod.is_from_package())
self.assertFalse(mod.is_from_package())
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_std_lib_sys_hackery_checks(self):
"""
The standard library performs sys.module assignment hackery which
causes modules who do this hackery to fail on import. See
https://github.com/pytorch/pytorch/issues/57490 for more information.
"""
if sys.version_info < (3, 13):
import package_a.std_sys_module_hacks as std_sys_module_hacks
else:
import package_a.std_sys_module_hacks_3_13 as std_sys_module_hacks
buffer = BytesIO()
mod = std_sys_module_hacks.Module()
with PackageExporter(buffer) as pe:
pe.intern("**")
pe.save_pickle("obj", "obj.pkl", mod)
buffer.seek(0)
pi = PackageImporter(buffer)
mod = pi.load_pickle("obj", "obj.pkl")
mod()
if __name__ == "__main__":
run_tests()
| TestMisc |
python | PyCQA__pylint | tests/functional/m/mapping_context.py | {
"start": 904,
"end": 1127
} | class ____:
kwargs = None
def get_kwargs(self):
return self.kwargs
def run(self, **kwargs):
print(kwargs)
def dispatch(self):
kws = self.get_kwargs()
self.run(**kws)
| SomeMixin |
python | numpy__numpy | numpy/_core/tests/test_scalar_methods.py | {
"start": 4226,
"end": 5118
} | class ____:
@pytest.mark.parametrize("str_value", ["inf", "nan"])
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_special(self, code: str, str_value: str) -> None:
cls = np.dtype(code).type
value = cls(str_value)
assert not value.is_integer()
@pytest.mark.parametrize(
"code", np.typecodes["Float"] + np.typecodes["AllInteger"]
)
def test_true(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
for value in float_array:
assert value.is_integer()
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_false(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
float_array *= 1.1
for value in float_array:
if value == 0:
continue
assert not value.is_integer()
| TestIsInteger |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/inference.py | {
"start": 330,
"end": 568
} | class ____(NamedTuple):
"""The information about an input that can be inferred from the function signature."""
name: str
annotation: Any
description: Optional[str]
default_value: Any = NoValueSentinel
| InferredInputProps |
python | lxml__lxml | src/lxml/tests/test_sax.py | {
"start": 11215,
"end": 12632
} | class ____(ContentHandler):
"""A SAX content handler that just stores the events"""
def __init__(self):
self.sax_events = []
super().__init__()
def startDocument(self):
self.sax_events.append(('startDocument',))
def endDocument(self):
self.sax_events.append(('endDocument',))
def startPrefixMapping(self, prefix, uri):
self.sax_events.append(('startPrefixMapping', prefix, uri))
def endPrefixMapping(self, prefix):
self.sax_events.append(('endPrefixMapping', prefix))
def startElement(self, name, attrs):
self.sax_events.append(('startElement', name, dict(attrs)))
def endElement(self, name):
self.sax_events.append(('endElement', name))
def startElementNS(self, name, qname, attrs):
self.sax_events.append(('startElementNS', name, qname, attrs._qnames))
def endElementNS(self, name, qname):
self.sax_events.append(('endElementNS', name, qname))
def characters(self, content):
self.sax_events.append(('characters', content))
def ignorableWhitespace(self, whitespace):
self.sax_events.append(('ignorableWhitespace', whitespace))
def processingInstruction(self, target, data):
self.sax_events.append(('processingInstruction', target, data))
def skippedEntity(self, name):
self.sax_events.append(('skippedEntity', name))
| SimpleContentHandler |
python | pallets__click | examples/aliases/aliases.py | {
"start": 46,
"end": 887
} | class ____:
"""The config in this example only holds aliases."""
def __init__(self):
self.path = os.getcwd()
self.aliases = {}
def add_alias(self, alias, cmd):
self.aliases.update({alias: cmd})
def read_config(self, filename):
parser = configparser.RawConfigParser()
parser.read([filename])
try:
self.aliases.update(parser.items("aliases"))
except configparser.NoSectionError:
pass
def write_config(self, filename):
parser = configparser.RawConfigParser()
parser.add_section("aliases")
for key, value in self.aliases.items():
parser.set("aliases", key, value)
with open(filename, "wb") as file:
parser.write(file)
pass_config = click.make_pass_decorator(Config, ensure=True)
| Config |
python | doocs__leetcode | solution/0300-0399/0387.First Unique Character in a String/Solution.py | {
"start": 0,
"end": 190
} | class ____:
def firstUniqChar(self, s: str) -> int:
cnt = Counter(s)
for i, c in enumerate(s):
if cnt[c] == 1:
return i
return -1
| Solution |
python | dask__distributed | distributed/deploy/tests/test_spec_cluster.py | {
"start": 524,
"end": 12400
} | class ____(Worker):
pass
worker_spec = {
0: {"cls": "dask.distributed.Worker", "options": {"nthreads": 1}},
1: {"cls": Worker, "options": {"nthreads": 2}},
"my-worker": {"cls": MyWorker, "options": {"nthreads": 3}},
}
scheduler = {"cls": Scheduler, "options": {"dashboard_address": ":0"}}
@gen_test()
async def test_specification():
async with SpecCluster(
workers=worker_spec, scheduler=scheduler, asynchronous=True
) as cluster:
assert cluster.worker_spec == worker_spec
assert len(cluster.workers) == 3
assert set(cluster.workers) == set(worker_spec)
assert isinstance(cluster.workers[0], Worker)
assert isinstance(cluster.workers[1], Worker)
assert isinstance(cluster.workers["my-worker"], MyWorker)
assert cluster.workers[0].state.nthreads == 1
assert cluster.workers[1].state.nthreads == 2
assert cluster.workers["my-worker"].state.nthreads == 3
async with Client(cluster, asynchronous=True) as client:
result = await client.submit(lambda x: x + 1, 10)
assert result == 11
for name in cluster.workers:
assert cluster.workers[name].name == name
def test_spec_sync(loop):
worker_spec = {
0: {"cls": Worker, "options": {"nthreads": 1}},
1: {"cls": Worker, "options": {"nthreads": 2}},
"my-worker": {"cls": MyWorker, "options": {"nthreads": 3}},
}
with SpecCluster(workers=worker_spec, scheduler=scheduler, loop=loop) as cluster:
assert cluster.worker_spec == worker_spec
assert len(cluster.workers) == 3
assert set(cluster.workers) == set(worker_spec)
assert isinstance(cluster.workers[0], Worker)
assert isinstance(cluster.workers[1], Worker)
assert isinstance(cluster.workers["my-worker"], MyWorker)
assert cluster.workers[0].state.nthreads == 1
assert cluster.workers[1].state.nthreads == 2
assert cluster.workers["my-worker"].state.nthreads == 3
with Client(cluster, loop=loop) as client:
assert cluster.loop is cluster.scheduler.loop
assert cluster.loop is client.loop
result = client.submit(lambda x: x + 1, 10).result()
assert result == 11
def test_loop_started_in_constructor(cleanup):
# test that SpecCluster.__init__ starts a loop in another thread
cluster = SpecCluster(worker_spec, scheduler=scheduler, loop=None)
try:
assert cluster.loop.asyncio_loop.is_running()
finally:
with cluster:
pass
@gen_test()
async def test_repr():
worker = {"cls": Worker, "options": {"nthreads": 1}}
class MyCluster(SpecCluster):
pass
async with MyCluster(
asynchronous=True, scheduler=scheduler, worker=worker
) as cluster:
assert "MyCluster" in str(cluster)
@gen_test()
async def test_scale():
worker = {"cls": Worker, "options": {"nthreads": 1}}
async with SpecCluster(
asynchronous=True, scheduler=scheduler, worker=worker
) as cluster:
assert not cluster.workers
assert not cluster.worker_spec
# Scale up
cluster.scale(2)
assert not cluster.workers
assert cluster.worker_spec
await cluster
assert len(cluster.workers) == 2
# Scale down
cluster.scale(1)
assert len(cluster.workers) == 2
await cluster
assert len(cluster.workers) == 1
# Can use with await
await cluster.scale(2)
await cluster
assert len(cluster.workers) == 2
@pytest.mark.slow
@gen_test()
async def test_adaptive_killed_worker():
with dask.config.set({"distributed.deploy.lost-worker-timeout": 0.1}):
async with SpecCluster(
asynchronous=True,
worker={"cls": Nanny, "options": {"nthreads": 1}},
scheduler=scheduler,
) as cluster:
async with Client(cluster, asynchronous=True) as client:
# Scale up a cluster with 1 worker.
cluster.adapt(minimum=1, maximum=1)
while not cluster.workers:
await asyncio.sleep(0.01)
future = client.submit(sleep, 0.1)
# Kill the only worker.
[worker_id] = cluster.workers
await cluster.workers[worker_id].kill()
# Wait for the worker to re-spawn and finish sleeping.
await future
@gen_test()
async def test_unexpected_closed_worker():
worker = {"cls": Worker, "options": {"nthreads": 1}}
with dask.config.set({"distributed.deploy.lost-worker-timeout": "10ms"}):
async with SpecCluster(
asynchronous=True, scheduler=scheduler, worker=worker
) as cluster:
assert not cluster.workers
assert not cluster.worker_spec
# Scale up
cluster.scale(2)
assert not cluster.workers
assert cluster.worker_spec
await cluster
assert len(cluster.workers) == 2
# Close one
await list(cluster.workers.values())[0].close()
start = time()
while len(cluster.workers) > 1: # wait for messages to flow around
await asyncio.sleep(0.01)
assert time() < start + 2
assert len(cluster.workers) == 1
assert len(cluster.worker_spec) == 2
await cluster
assert len(cluster.workers) == 2
@gen_test(timeout=60)
async def test_restart():
"""Regression test for https://github.com/dask/distributed/issues/3062"""
worker = {"cls": Nanny, "options": {"nthreads": 1}}
async with SpecCluster(
asynchronous=True, scheduler=scheduler, worker=worker
) as cluster:
async with Client(cluster, asynchronous=True) as client:
cluster.scale(2)
await cluster
assert len(cluster.workers) == 2
await client.restart()
while len(cluster.workers) < 2:
await asyncio.sleep(0.01)
@gen_test()
async def test_broken_worker():
class BrokenWorkerException(Exception):
pass
class BrokenWorker(Worker):
def __await__(self):
async def _():
raise BrokenWorkerException("Worker Broken")
return _().__await__()
cluster = SpecCluster(
asynchronous=True,
workers={"good": {"cls": Worker}, "bad": {"cls": BrokenWorker}},
scheduler=scheduler,
)
with pytest.raises(BrokenWorkerException, match=r"Worker Broken"):
async with cluster:
pass
@pytest.mark.skipif(WINDOWS, reason="HTTP Server doesn't close out")
def test_spec_close_clusters(loop):
workers = {0: {"cls": Worker}}
cluster = SpecCluster(workers=workers, scheduler=scheduler, loop=loop)
assert cluster in SpecCluster._instances
close_clusters()
assert cluster.status == Status.closed
@gen_test()
async def test_new_worker_spec():
class MyCluster(SpecCluster):
def new_worker_spec(self):
i = len(self.worker_spec)
return {i: {"cls": Worker, "options": {"nthreads": i + 1}}}
async with MyCluster(asynchronous=True, scheduler=scheduler) as cluster:
cluster.scale(3)
for i in range(3):
assert cluster.worker_spec[i]["options"]["nthreads"] == i + 1
@gen_test()
async def test_nanny_port():
workers = {0: {"cls": Nanny, "options": {"port": 9200}}}
async with SpecCluster(scheduler=scheduler, workers=workers, asynchronous=True):
pass
@gen_test()
async def test_spec_process():
proc = ProcessInterface()
assert proc.status == Status.created
await proc
assert proc.status == Status.running
await proc.close()
assert proc.status == Status.closed
@gen_test()
async def test_get_logs():
worker = {"cls": Worker, "options": {"nthreads": 1}}
async with SpecCluster(
asynchronous=True, scheduler=scheduler, worker=worker
) as cluster:
cluster.scale(2)
await cluster
logs = await cluster.get_logs()
assert isinstance(logs, dict)
assert all(isinstance(log, str) for log in logs)
assert is_valid_xml("<div>" + logs._repr_html_() + "</div>")
assert "Scheduler" in logs
for worker in cluster.scheduler.workers:
assert worker in logs
assert "Registered" in str(logs)
logs = await cluster.get_logs(cluster=True, scheduler=False, workers=False)
assert list(logs) == ["Cluster"]
logs = await cluster.get_logs(cluster=False, scheduler=True, workers=False)
assert list(logs) == ["Scheduler"]
logs = await cluster.get_logs(cluster=False, scheduler=False, workers=False)
assert list(logs) == []
logs = await cluster.get_logs(cluster=False, scheduler=False, workers=True)
assert set(logs) == set(cluster.scheduler.workers)
w = toolz.first(cluster.scheduler.workers)
logs = await cluster.get_logs(cluster=False, scheduler=False, workers=[w])
assert set(logs) == {w}
@gen_test()
async def test_logs_deprecated():
async with SpecCluster(asynchronous=True, scheduler=scheduler) as cluster:
with pytest.warns(FutureWarning, match="get_logs"):
logs = await cluster.logs()
assert logs["Scheduler"]
@gen_test()
async def test_scheduler_info():
async with SpecCluster(
workers=worker_spec, scheduler=scheduler, asynchronous=True
) as cluster:
assert (
cluster.scheduler_info["id"] == cluster.scheduler.id
) # present at startup
start = time() # wait for all workers
while len(cluster.scheduler_info["workers"]) < len(cluster.workers):
await asyncio.sleep(0.01)
assert time() < start + 1
assert set(cluster.scheduler.identity()["workers"]) == set(
cluster.scheduler_info["workers"]
)
assert (
cluster.scheduler.identity()["services"]
== cluster.scheduler_info["services"]
)
assert len(cluster.scheduler_info["workers"]) == len(cluster.workers)
@gen_test()
async def test_dashboard_link():
async with SpecCluster(
workers=worker_spec,
scheduler={"cls": Scheduler, "options": {"dashboard_address": ":12345"}},
asynchronous=True,
) as cluster:
assert "12345" in cluster.dashboard_link
@gen_test()
async def test_widget():
async with SpecCluster(
workers=worker_spec,
scheduler=scheduler,
asynchronous=True,
worker={"cls": Worker, "options": {"nthreads": 1}},
) as cluster:
start = time() # wait for all workers
while len(cluster.scheduler_info["workers"]) < len(cluster.worker_spec):
await asyncio.sleep(0.01)
assert time() < start + 1
cluster.scale(5)
assert "3 / 5" in cluster._scaling_status()
@gen_test()
async def test_scale_cores_memory():
async with SpecCluster(
scheduler=scheduler,
worker={"cls": Worker, "options": {"nthreads": 1}},
asynchronous=True,
) as cluster:
cluster.scale(cores=2)
assert len(cluster.worker_spec) == 2
with pytest.raises(ValueError) as info:
cluster.scale(memory="5GB")
assert "memory" in str(info.value)
@gen_test()
async def test_ProcessInterfaceValid():
async with SpecCluster(
scheduler=scheduler, worker={"cls": ProcessInterface}, asynchronous=True
) as cluster:
cluster.scale(2)
await cluster
assert len(cluster.worker_spec) == len(cluster.workers) == 2
cluster.scale(1)
await cluster
assert len(cluster.worker_spec) == len(cluster.workers) == 1
| MyWorker |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 30004,
"end": 30750
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a work queue."""
name: Optional[str] = Field(default=None)
description: Optional[str] = Field(default=None)
is_paused: bool = Field(
default=False, description="Whether or not the work queue is paused."
)
concurrency_limit: Optional[NonNegativeInteger] = Field(default=None)
priority: Optional[PositiveInteger] = Field(
None, description="The queue's priority."
)
last_polled: Optional[DateTime] = Field(default=None)
# DEPRECATED
filter: Optional[objects.QueueFilter] = Field(
None,
description="DEPRECATED: Filter criteria for the work queue.",
deprecated=True,
)
| WorkQueueUpdate |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_bigquery_to_postgres.py | {
"start": 1781,
"end": 10809
} | class ____:
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_postgres.bigquery_get_data")
@mock.patch.object(BigQueryToPostgresOperator, "bigquery_hook", new_callable=mock.PropertyMock)
@mock.patch.object(BigQueryToPostgresOperator, "postgres_hook", new_callable=mock.PropertyMock)
def test_execute_good_request_to_bq(self, mock_pg_hook, mock_bq_hook, mock_bigquery_get_data):
operator = BigQueryToPostgresOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=TEST_DESTINATION_TABLE,
replace=False,
)
mock_bigquery_get_data.return_value = [[("row1", "val1")], [("row2", "val2")]]
mock_pg = mock.MagicMock()
mock_pg_hook.return_value = mock_pg
mock_bq = mock.MagicMock()
mock_bq.project_id = TEST_PROJECT
mock_bq_hook.return_value = mock_bq
operator.execute(context=mock.MagicMock())
mock_bigquery_get_data.assert_called_once_with(
operator.log,
TEST_DATASET,
TEST_TABLE_ID,
mock_bq,
operator.batch_size,
operator.selected_fields,
)
assert mock_pg.insert_rows.call_count == 2
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_postgres.bigquery_get_data")
@mock.patch.object(BigQueryToPostgresOperator, "bigquery_hook", new_callable=mock.PropertyMock)
@mock.patch.object(BigQueryToPostgresOperator, "postgres_hook", new_callable=mock.PropertyMock)
def test_execute_good_request_to_bq_with_replace(
self,
mock_pg_hook,
mock_bq_hook,
mock_bigquery_get_data,
):
operator = BigQueryToPostgresOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=TEST_DESTINATION_TABLE,
replace=True,
selected_fields=["col_1", "col_2"],
replace_index=["col_1"],
)
mock_bigquery_get_data.return_value = [[("only_row", "val")]]
mock_pg = mock.MagicMock()
mock_pg_hook.return_value = mock_pg
mock_bq = mock.MagicMock()
mock_bq.project_id = TEST_PROJECT
mock_bq_hook.return_value = mock_bq
operator.execute(context=mock.MagicMock())
mock_bigquery_get_data.assert_called_once_with(
operator.log,
TEST_DATASET,
TEST_TABLE_ID,
mock_bq,
operator.batch_size,
["col_1", "col_2"],
)
mock_pg.insert_rows.assert_called_once_with(
table=TEST_DESTINATION_TABLE,
rows=[("only_row", "val")],
target_fields=["col_1", "col_2"],
replace=True,
commit_every=operator.batch_size,
replace_index=["col_1"],
)
@pytest.mark.parametrize(
("selected_fields", "replace_index"),
[(None, None), (["col_1, col_2"], None), (None, ["col_1"])],
)
def test_init_raises_exception_if_replace_is_true_and_missing_params(
self, selected_fields, replace_index
):
error_msg = "PostgreSQL ON CONFLICT upsert syntax requires column names and a unique index."
with pytest.raises(ValueError, match=error_msg):
_ = BigQueryToPostgresOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=TEST_DESTINATION_TABLE,
replace=True,
selected_fields=selected_fields,
replace_index=replace_index,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id"
)
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_postgres.register_adapter")
def test_adapters_to_json_registered(self, mock_register_adapter, mock_get_creds, mock_get_client):
mock_get_creds.return_value = (None, TEST_PROJECT)
client = MagicMock()
client.list_rows.return_value = []
mock_get_client.return_value = client
operator = BigQueryToPostgresOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=TEST_DESTINATION_TABLE,
replace=False,
)
operator.postgres_hook
mock_register_adapter.assert_any_call(list, Json)
mock_register_adapter.assert_any_call(dict, Json)
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_postgres.PostgresHook")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
def test_get_openlineage_facets_on_complete_no_selected_fields(self, mock_bq_hook, mock_postgres_hook):
mock_bq_client = MagicMock()
mock_bq_client.get_table.return_value = _make_bq_table(["id", "name", "value"])
mock_bq_hook.get_client.return_value = mock_bq_client
mock_bq_hook.return_value = mock_bq_hook
db_info = MagicMock(scheme="postgres", authority="localhost:5432", database="postgresdb")
mock_postgres_hook.get_openlineage_database_info.return_value = db_info
mock_postgres_hook.get_openlineage_default_schema.return_value = "postgres-schema"
mock_postgres_hook.return_value = mock_postgres_hook
op = BigQueryToPostgresOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name="destination",
selected_fields=None,
database="postgresdb",
)
op.bigquery_hook = mock_bq_hook
op.bigquery_hook.project_id = TEST_PROJECT
op.postgres_hook = mock_postgres_hook
context = mock.MagicMock()
op.execute(context=context)
result = op.get_openlineage_facets_on_complete(task_instance=MagicMock())
assert len(result.inputs) == 1
assert len(result.outputs) == 1
input_ds = result.inputs[0]
assert input_ds.namespace == "bigquery"
assert input_ds.name == f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}"
assert "schema" in input_ds.facets
schema_fields = [f.name for f in input_ds.facets["schema"].fields]
assert set(schema_fields) == {"id", "name", "value"}
output_ds = result.outputs[0]
assert output_ds.namespace == "postgres://localhost:5432"
assert output_ds.name == "postgresdb.postgres-schema.destination"
assert "columnLineage" in output_ds.facets
col_lineage = output_ds.facets["columnLineage"]
assert set(col_lineage.fields.keys()) == {"id", "name", "value"}
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_postgres.PostgresHook")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
def test_get_openlineage_facets_on_complete_selected_fields(self, mock_bq_hook, mock_postgres_hook):
mock_bq_client = MagicMock()
mock_bq_client.get_table.return_value = _make_bq_table(["id", "name", "value"])
mock_bq_hook.get_client.return_value = mock_bq_client
mock_bq_hook.return_value = mock_bq_hook
db_info = MagicMock(scheme="postgres", authority="localhost:5432", database="postgresdb")
mock_postgres_hook.get_openlineage_database_info.return_value = db_info
mock_postgres_hook.get_openlineage_default_schema.return_value = "postgres-schema"
mock_postgres_hook.return_value = mock_postgres_hook
op = BigQueryToPostgresOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name="destination",
selected_fields=["id", "name"],
database="postgresdb",
)
op.bigquery_hook = mock_bq_hook
op.bigquery_hook.project_id = TEST_PROJECT
op.postgres_hook = mock_postgres_hook
context = mock.MagicMock()
op.execute(context=context)
result = op.get_openlineage_facets_on_complete(task_instance=MagicMock())
assert len(result.inputs) == 1
assert len(result.outputs) == 1
input_ds = result.inputs[0]
assert input_ds.namespace == "bigquery"
assert input_ds.name == f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}"
assert "schema" in input_ds.facets
schema_fields = [f.name for f in input_ds.facets["schema"].fields]
assert set(schema_fields) == {"id", "name"}
output_ds = result.outputs[0]
assert output_ds.namespace == "postgres://localhost:5432"
assert output_ds.name == "postgresdb.postgres-schema.destination"
assert "columnLineage" in output_ds.facets
col_lineage = output_ds.facets["columnLineage"]
assert set(col_lineage.fields.keys()) == {"id", "name"}
| TestBigQueryToPostgresOperator |
python | django__django | django/contrib/staticfiles/handlers.py | {
"start": 2721,
"end": 4043
} | class ____(StaticFilesHandlerMixin, ASGIHandler):
"""
ASGI application which wraps another and intercepts requests for static
files, passing them off to Django's static file serving.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
async def __call__(self, scope, receive, send):
# Only even look at HTTP requests
if scope["type"] == "http" and self._should_handle(scope["path"]):
# Serve static content
# (the one thing super() doesn't do is __call__, apparently)
return await super().__call__(scope, receive, send)
# Hand off to the main app
return await self.application(scope, receive, send)
async def get_response_async(self, request):
response = await super().get_response_async(request)
response._resource_closers.append(request.close)
# FileResponse is not async compatible.
if response.streaming and not response.is_async:
_iterator = response.streaming_content
async def awrapper():
for part in await sync_to_async(list)(_iterator):
yield part
response.streaming_content = awrapper()
return response
| ASGIStaticFilesHandler |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 9828,
"end": 10452
} | class ____(BaseModel):
"""
Current clustering distribution for the collection
"""
peer_id: int = Field(..., description="ID of this peer")
shard_count: int = Field(..., description="Total number of shards")
local_shards: List["LocalShardInfo"] = Field(..., description="Local shards")
remote_shards: List["RemoteShardInfo"] = Field(..., description="Remote shards")
shard_transfers: List["ShardTransferInfo"] = Field(..., description="Shard transfers")
resharding_operations: Optional[List["ReshardingInfo"]] = Field(default=None, description="Resharding operations")
| CollectionClusterInfo |
python | cookiecutter__cookiecutter | cookiecutter/environment.py | {
"start": 214,
"end": 2003
} | class ____:
"""Mixin providing sane loading of extensions specified in a given context.
The context is being extracted from the keyword arguments before calling
the next parent class in line of the child.
"""
def __init__(self, *, context: dict[str, Any] | None = None, **kwargs: Any) -> None:
"""Initialize the Jinja2 Environment object while loading extensions.
Does the following:
1. Establishes default_extensions (currently just a Time feature)
2. Reads extensions set in the cookiecutter.json _extensions key.
3. Attempts to load the extensions. Provides useful error if fails.
"""
context = context or {}
default_extensions = [
'cookiecutter.extensions.JsonifyExtension',
'cookiecutter.extensions.RandomStringExtension',
'cookiecutter.extensions.SlugifyExtension',
'cookiecutter.extensions.TimeExtension',
'cookiecutter.extensions.UUIDExtension',
]
extensions = default_extensions + self._read_extensions(context)
try:
super().__init__(extensions=extensions, **kwargs) # type: ignore[call-arg]
except ImportError as err:
msg = f'Unable to load extension: {err}'
raise UnknownExtension(msg) from err
def _read_extensions(self, context: dict[str, Any]) -> list[str]:
"""Return list of extensions as str to be passed on to the Jinja2 env.
If context does not contain the relevant info, return an empty
list instead.
"""
try:
extensions = context['cookiecutter']['_extensions']
except KeyError:
return []
else:
return [str(ext) for ext in extensions]
| ExtensionLoaderMixin |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/clip_ops_test.py | {
"start": 1294,
"end": 21702
} | class ____(test.TestCase):
# ClipByValue test
def testClipByValue(self):
with self.session():
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Scalar, Scalar]
def testClipByValue0Type(self):
for dtype in [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.bfloat16,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.uint8,
]:
with self.cached_session():
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [4, 4, 4]]
clip_value_min = 2
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Tensor, Scalar]
def testClipByValue1Type(self):
for dtype in [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.bfloat16,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.uint8,
]:
with self.cached_session():
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [4, 4, 4]]
clip_value_min = constant_op.constant(
[2, 2, 2, 3, 3, 3], shape=[2, 3], dtype=dtype)
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Scalar, Tensor]
def testClipByValue2Type(self):
for dtype in [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.bfloat16,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.uint8,
]:
with self.cached_session():
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[4, 4, 4], [4, 5, 6]]
clip_value_min = 4
clip_value_max = constant_op.constant(
[6, 6, 6, 6, 6, 6], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# [Tensor, Tensor, Tensor]
def testClipByValue3Type(self):
for dtype in [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.bfloat16,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.uint8,
]:
with self.cached_session():
x = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
np_ans = [[2, 2, 3], [5, 5, 6]]
clip_value_min = constant_op.constant(
[2, 2, 2, 5, 5, 5], shape=[2, 3], dtype=dtype)
clip_value_max = constant_op.constant(
[5, 5, 5, 7, 7, 7], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByValueGradient(self):
def grad(x, y, z, clip_fn):
x = constant_op.constant(x, dtype=dtypes.float32)
y = constant_op.constant(y, dtype=dtypes.float32)
z = constant_op.constant(z, dtype=dtypes.float32)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
tape.watch(z)
output = clip_fn(x, y, z)
return tape.gradient(output, [x, y, z])
for f in (clip_ops.clip_by_value, gen_math_ops._clip_by_value):
with self.subTest(f=f):
# Input: [Scalar, Scalar, Scalar]
xg, yg, zg = grad(0, -1, 1, clip_fn=f)
self.assertEqual(self.evaluate(xg), 1)
self.assertEqual(self.evaluate(yg), 0)
self.assertEqual(self.evaluate(zg), 0)
# Input: [Scalar, Scalar, Scalar]
xg, yg, zg = grad(2, -1, 1, clip_fn=f)
self.assertEqual(self.evaluate(xg), 0)
self.assertEqual(self.evaluate(yg), 0)
self.assertEqual(self.evaluate(zg), 1)
# Input: [Vector, Scalar, Scalar]
xg, yg, zg = grad([0, -2, 2, -2], -1, 1, clip_fn=f)
self.assertAllEqual(self.evaluate(xg), [1, 0, 0, 0])
self.assertEqual(self.evaluate(yg), 2)
self.assertEqual(self.evaluate(zg), 1)
# Input: [Vector, Vector, Scalar]
xg, yg, zg = grad([-1, -2, 0, 2], [-2, -1, -3, 0], 1, clip_fn=f)
self.assertAllEqual(self.evaluate(xg), [1, 0, 1, 0])
self.assertAllEqual(self.evaluate(yg), [0, 1, 0, 0])
self.assertEqual(self.evaluate(zg), 1)
# Input: [Vector, Vector, Vector]
xg, yg, zg = grad(
[-1, -2, 0, 2], [-2, -1, -3, 0], [1, 2, -1, 1], clip_fn=f
)
self.assertAllEqual(self.evaluate(xg), [1, 0, 0, 0])
self.assertAllEqual(self.evaluate(yg), [0, 1, 0, 0])
self.assertAllEqual(self.evaluate(zg), [0, 0, 1, 1])
# Only test the following with `clip_ops.clip_by_value`, as
# `gen_math_ops._clip_by_value` requires the min and max values to be
# scalar or the same shape as the input.
# Input: [Matrix, Vector, Matrix]
xg, yg, zg = grad([[-2, 3], [2, -1]], [-1, -2], [[1, 2], [3, 4]],
clip_fn=clip_ops.clip_by_value)
self.assertAllEqual(self.evaluate(xg), [[0, 0], [1, 1]])
self.assertAllEqual(self.evaluate(yg), [1, 0])
self.assertAllEqual(self.evaluate(zg), [[0, 1], [0, 0]])
def testClipByValueBadShape(self):
with self.session():
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, -clip, clip) # pylint: disable=invalid-unary-operand-type
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_value(x, 1.0, clip)
def testClipByValueNonFinite(self):
# TODO(b/78016351): Enable test on GPU once the bug is fixed.
with self.cached_session():
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def _testClipIndexedSlicesByValue(self, values, indices, shape,
clip_value_min, clip_value_max, expected):
with self.session():
values = constant_op.constant(values)
indices = constant_op.constant(indices)
shape = constant_op.constant(shape)
# IndexedSlices mode
indexed_slices = indexed_slices_lib.IndexedSlices(values, indices, shape)
clipped = clip_ops.clip_by_value(indexed_slices, clip_value_min,
clip_value_max)
# clipped should be IndexedSlices
self.assertIsInstance(clipped, indexed_slices_lib.IndexedSlices)
self.assertAllClose(clipped.values, expected)
def testClipByValueWithIndexedSlicesClipped(self):
values = [[[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]],
[[0.0, 2.0, 0.0], [0.0, 0.0, -1.0]]]
indices = [2, 6]
shape = [10, 2, 3]
# [-2.0, 2.0]
self._testClipIndexedSlicesByValue(values, indices, shape, -2.0, 2.0,
[[[-2.0, 0.0, 0.0], [2.0, 0.0, 0.0]],
[[0.0, 2.0, 0.0], [0.0, 0.0, -1.0]]])
# [1.0, 2.0]
self._testClipIndexedSlicesByValue(values, indices, shape, 1.0, 2.0,
[[[1.0, 1.0, 1.0], [2.0, 1.0, 1.0]],
[[1.0, 2.0, 1.0], [1.0, 1.0, 1.0]]])
# [-2.0, -1.0]
self._testClipIndexedSlicesByValue(
values, indices, shape, -2.0, -1.0,
[[[-2.0, -1.0, -1.0], [-1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]])
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
self.assertAllClose(np_ans, tf_ans_tensor)
@test_util.run_deprecated_v1
def testClipByNormGradientZeros(self):
with self.session():
x = array_ops.zeros([3])
b = clip_ops.clip_by_norm(x, 1.)
grad, = gradients_impl.gradients(b, x)
self.assertAllEqual(grad, [1., 1., 1.])
def testClipByNormBadShape(self):
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3, 1])
# Use a nonsensical shape.
clip = constant_op.constant([1.0, 2.0])
with self.assertRaises(ValueError):
_ = clip_ops.clip_by_norm(x, clip)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.session():
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim0(self):
# Norm clipping when clip_norm < 5
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormClippedWithDim1(self):
# Norm clipping when clip_norm < 5
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClippedWithAxes(self):
# No norm clipping when clip_norm >= 5
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
# ClipByGlobalNorm tests
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[1])
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormClippedTensor(self):
# Norm clipping when clip_norm < 5
with self.session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = constant_op.constant(4.0)
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[1])
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[2])
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
@test_util.run_deprecated_v1
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = indexed_slices_lib.IndexedSlices(
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[1].values)
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
slices = indexed_slices_lib.IndexedSlices(
constant_op.constant([1.0]),
constant_op.constant([0]),
dense_shape=dense_shape)
ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.session():
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[1])
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.session():
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = constant_op.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[1])
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormInf(self):
# Expect all NaNs when global norm is inf.
with self.session():
x0 = constant_op.constant([-2.0, 0.0, np.inf, 4.0, 0.0, 0.0],
shape=[2, 3])
x1 = constant_op.constant([1.0, -2.0])
clip_norm = 6.0
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = self.evaluate(ans[0])
tf_ans_2 = self.evaluate(ans[1])
tf_norm = self.evaluate(norm)
self.assertAllEqual(tf_norm, float('inf'))
self.assertAllEqual(tf_ans_1, np.full([2, 3], float('nan')))
self.assertAllEqual(tf_ans_2, np.full([2], float('nan')))
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormClippedTensor(self):
# Norm clipping when average clip_norm < 0.83333333
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.session():
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormReplacedWithClipByNorm(self):
# Check clip_by_average_norm(t) is the same as
# clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t)))
with self.session():
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
# expected answer [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
with_norm = clip_ops.clip_by_average_norm(x, clip_norm)
without_norm = clip_ops.clip_by_norm(
x, clip_norm * math_ops.cast(array_ops.size(x), dtypes.float32))
clip_by_average_norm_ans = self.evaluate(with_norm)
clip_by_norm_ans = self.evaluate(without_norm)
self.assertAllClose(clip_by_average_norm_ans, clip_by_norm_ans)
@test_util.run_deprecated_v1
def testClipByValueEmptyTensor(self):
# Test case for GitHub issue 19337
zero = array_ops.placeholder(dtype=dtypes.float32, shape=None)
x = clip_ops.clip_by_value(zero, zero, zero)
y = clip_ops.clip_by_value(zero, 1.0, 1.0)
z = clip_ops.clip_by_value(zero, zero, 1.0)
w = clip_ops.clip_by_value(zero, 1.0, zero)
with self.session() as sess:
sess.run([x, y, z, w], feed_dict={zero: np.zeros((7, 0))})
if __name__ == '__main__':
test.main()
| ClipTest |
python | realpython__materials | python-range/pi_digits.py | {
"start": 47,
"end": 182
} | class ____:
num_digits: int
def __index__(self):
return int("3141592653589793238462643383279"[: self.num_digits])
| PiDigits |
python | ray-project__ray | python/ray/llm/_internal/batch/processor/vllm_engine_proc.py | {
"start": 1686,
"end": 2026
} | class ____(BaseModelExtended):
bundles: List[BundleSchema] = Field(
default_factory=list, description="The bundles for the placement group."
)
strategy: Literal["PACK", "STRICT_PACK", "SPREAD", "STRICT_SPREAD"] = Field(
default="PACK", description="The strategy for the placement group."
)
| PlacementGroupSchema |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_qt.py | {
"start": 37700,
"end": 41924
} | class ____(QtWidgets.QDialog):
def __init__(self, targetfig, parent):
super().__init__(parent)
self.setWindowIcon(QtGui.QIcon(
str(cbook._get_data_path("images/matplotlib.png"))))
self.setObjectName("SubplotTool")
self._spinboxes = {}
main_layout = QtWidgets.QHBoxLayout()
self.setLayout(main_layout)
for group, spinboxes, buttons in [
("Borders",
["top", "bottom", "left", "right"],
[("Export values", self._export_values)]),
("Spacings",
["hspace", "wspace"],
[("Tight layout", self._tight_layout),
("Reset", self._reset),
("Close", self.close)])]:
layout = QtWidgets.QVBoxLayout()
main_layout.addLayout(layout)
box = QtWidgets.QGroupBox(group)
layout.addWidget(box)
inner = QtWidgets.QFormLayout(box)
for name in spinboxes:
self._spinboxes[name] = spinbox = QtWidgets.QDoubleSpinBox()
spinbox.setRange(0, 1)
spinbox.setDecimals(3)
spinbox.setSingleStep(0.005)
spinbox.setKeyboardTracking(False)
spinbox.valueChanged.connect(self._on_value_changed)
inner.addRow(name, spinbox)
layout.addStretch(1)
for name, method in buttons:
button = QtWidgets.QPushButton(name)
# Don't trigger on <enter>, which is used to input values.
button.setAutoDefault(False)
button.clicked.connect(method)
layout.addWidget(button)
if name == "Close":
button.setFocus()
self._figure = targetfig
self._defaults = {}
self._export_values_dialog = None
self.update_from_current_subplotpars()
def update_from_current_subplotpars(self):
self._defaults = {spinbox: getattr(self._figure.subplotpars, name)
for name, spinbox in self._spinboxes.items()}
self._reset() # Set spinbox current values without triggering signals.
def _export_values(self):
# Explicitly round to 3 decimals (which is also the spinbox precision)
# to avoid numbers of the form 0.100...001.
self._export_values_dialog = QtWidgets.QDialog()
layout = QtWidgets.QVBoxLayout()
self._export_values_dialog.setLayout(layout)
text = QtWidgets.QPlainTextEdit()
text.setReadOnly(True)
layout.addWidget(text)
text.setPlainText(
",\n".join(f"{attr}={spinbox.value():.3}"
for attr, spinbox in self._spinboxes.items()))
# Adjust the height of the text widget to fit the whole text, plus
# some padding.
size = text.maximumSize()
size.setHeight(
QtGui.QFontMetrics(text.document().defaultFont())
.size(0, text.toPlainText()).height() + 20)
text.setMaximumSize(size)
self._export_values_dialog.show()
def _on_value_changed(self):
spinboxes = self._spinboxes
# Set all mins and maxes, so that this can also be used in _reset().
for lower, higher in [("bottom", "top"), ("left", "right")]:
spinboxes[higher].setMinimum(spinboxes[lower].value() + .001)
spinboxes[lower].setMaximum(spinboxes[higher].value() - .001)
self._figure.subplots_adjust(
**{attr: spinbox.value() for attr, spinbox in spinboxes.items()})
self._figure.canvas.draw_idle()
def _tight_layout(self):
self._figure.tight_layout()
for attr, spinbox in self._spinboxes.items():
spinbox.blockSignals(True)
spinbox.setValue(getattr(self._figure.subplotpars, attr))
spinbox.blockSignals(False)
self._figure.canvas.draw_idle()
def _reset(self):
for spinbox, value in self._defaults.items():
spinbox.setRange(0, 1)
spinbox.blockSignals(True)
spinbox.setValue(value)
spinbox.blockSignals(False)
self._on_value_changed()
| SubplotToolQt |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB118.py | {
"start": 2918,
"end": 3233
} | class ____:
@pytest.mark.parametrize(
"slicer, expected",
[
(lambda x: x[-2:], "foo"),
(lambda x: x[-5:-3], "bar"),
],
)
def test_inlet_asset_alias_extra_slice(self, slicer, expected):
assert slice("whatever") == expected
| TheLambdasHereAreNotMethods |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1064794,
"end": 1089685
} | class ____(VegaLiteSchema):
"""
Scale schema wrapper.
Parameters
----------
align : dict, float, :class:`ExprRef`
The alignment of the steps within the scale range.
This value must lie in the range ``[0,1]``. A value of ``0.5`` indicates that the
steps should be centered within the range. A value of ``0`` or ``1`` may be used to
shift the bands to one side, say to position them adjacent to an axis.
**Default value:** ``0.5``
base : dict, float, :class:`ExprRef`
The logarithm base of the ``log`` scale (default ``10``).
bins : dict, Sequence[float], :class:`ScaleBins`, :class:`ScaleBinParams`
Bin boundaries can be provided to scales as either an explicit array of bin
boundaries or as a bin specification object. The legal values are:
* An `array <../types/#Array>`__ literal of bin boundary values. For example, ``[0,
5, 10, 15, 20]``. The array must include both starting and ending boundaries. The
previous example uses five values to indicate a total of four bin intervals:
[0-5), [5-10), [10-15), [15-20]. Array literals may include signal references as
elements.
* A `bin specification object
<https://vega.github.io/vega-lite/docs/scale.html#bins>`__ that indicates the bin
*step* size, and optionally the *start* and *stop* boundaries.
* An array of bin boundaries over the scale domain. If provided, axes and legends
will use the bin boundaries to inform the choice of tick marks and text labels.
clamp : bool, dict, :class:`ExprRef`
If ``true``, values that exceed the data domain are clamped to either the minimum or
maximum range value
**Default value:** derived from the `scale config
<https://vega.github.io/vega-lite/docs/config.html#scale-config>`__'s ``clamp``
(``true`` by default).
constant : dict, float, :class:`ExprRef`
A constant determining the slope of the symlog function around zero. Only used for
``symlog`` scales.
**Default value:** ``1``
domain : dict, :class:`ExprRef`, Literal['unaggregated'], :class:`DomainUnionWith`, :class:`ParameterExtent`, Sequence[str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, None]
Customized domain values in the form of constant values or dynamic values driven by
a parameter.
1) Constant ``domain`` for *quantitative* fields can take one of the following
forms:
* A two-element array with minimum and maximum values. To create a diverging scale,
this two-element array can be combined with the ``domainMid`` property.
* An array with more than two entries, for `Piecewise quantitative scales
<https://vega.github.io/vega-lite/docs/scale.html#piecewise>`__.
* A string value ``"unaggregated"``, if the input field is aggregated, to indicate
that the domain should include the raw data values prior to the aggregation.
2) Constant ``domain`` for *temporal* fields can be a two-element array with minimum
and maximum values, in the form of either timestamps or the `DateTime definition
objects <https://vega.github.io/vega-lite/docs/types.html#datetime>`__.
3) Constant ``domain`` for *ordinal* and *nominal* fields can be an array that lists
valid input values.
4) To combine (union) specified constant domain with the field's values, ``domain``
can be an object with a ``unionWith`` property that specify constant domain to be
combined. For example, ``domain: {unionWith: [0, 100]}`` for a quantitative scale
means that the scale domain always includes ``[0, 100]``, but will include other
values in the fields beyond ``[0, 100]``.
5) Domain can also takes an object defining a field or encoding of a parameter that
`interactively determines
<https://vega.github.io/vega-lite/docs/selection.html#scale-domains>`__ the scale
domain.
domainMax : dict, float, :class:`ExprRef`, :class:`DateTime`
Sets the maximum value in the scale domain, overriding the ``domain`` property. This
property is only intended for use with scales having continuous domains.
domainMid : dict, float, :class:`ExprRef`
Inserts a single mid-point value into a two-element domain. The mid-point value must
lie between the domain minimum and maximum values. This property can be useful for
setting a midpoint for `diverging color scales
<https://vega.github.io/vega-lite/docs/scale.html#piecewise>`__. The domainMid
property is only intended for use with scales supporting continuous, piecewise
domains.
domainMin : dict, float, :class:`ExprRef`, :class:`DateTime`
Sets the minimum value in the scale domain, overriding the domain property. This
property is only intended for use with scales having continuous domains.
domainRaw : dict, :class:`ExprRef`
An expression for an array of raw values that, if non-null, directly overrides the
*domain* property. This is useful for supporting interactions such as panning or
zooming a scale. The scale may be initially determined using a data-driven domain,
then modified in response to user input by setting the rawDomain value.
exponent : dict, float, :class:`ExprRef`
The exponent of the ``pow`` scale.
interpolate : dict, :class:`ExprRef`, :class:`ScaleInterpolateEnum`, :class:`ScaleInterpolateParams`, Literal['rgb', 'lab', 'hcl', 'hsl', 'hsl-long', 'hcl-long', 'cubehelix', 'cubehelix-long']
The interpolation method for range values. By default, a general interpolator for
numbers, dates, strings and colors (in HCL space) is used. For color ranges, this
property allows interpolation in alternative color spaces. Legal values include
``rgb``, ``hsl``, ``hsl-long``, ``lab``, ``hcl``, ``hcl-long``, ``cubehelix`` and
``cubehelix-long`` ('-long' variants use longer paths in polar coordinate spaces).
If object-valued, this property accepts an object with a string-valued *type*
property and an optional numeric *gamma* property applicable to rgb and cubehelix
interpolators. For more, see the `d3-interpolate documentation
<https://github.com/d3/d3-interpolate>`__.
* **Default value:** ``hcl``
nice : bool, dict, float, :class:`ExprRef`, :class:`TimeInterval`, :class:`TimeIntervalStep`, Literal['millisecond', 'second', 'minute', 'hour', 'day', 'week', 'month', 'year']
Extending the domain so that it starts and ends on nice round values. This method
typically modifies the scale's domain, and may only extend the bounds to the nearest
round value. Nicing is useful if the domain is computed from data and may be
irregular. For example, for a domain of *[0.201479…, 0.996679…]*, a nice domain
might be *[0.2, 1.0]*.
For quantitative scales such as linear, ``nice`` can be either a boolean flag or a
number. If ``nice`` is a number, it will represent a desired tick count. This allows
greater control over the step size used to extend the bounds, guaranteeing that the
returned ticks will exactly cover the domain.
For temporal fields with time and utc scales, the ``nice`` value can be a string
indicating the desired time interval. Legal values are ``"millisecond"``,
``"second"``, ``"minute"``, ``"hour"``, ``"day"``, ``"week"``, ``"month"``, and
``"year"``. Alternatively, ``time`` and ``utc`` scales can accept an object-valued
interval specifier of the form ``{"interval": "month", "step": 3}``, which includes
a desired number of interval steps. Here, the domain would snap to quarter (Jan,
Apr, Jul, Oct) boundaries.
**Default value:** ``true`` for unbinned *quantitative* fields without explicit
domain bounds; ``false`` otherwise.
padding : dict, float, :class:`ExprRef`
For *`continuous <https://vega.github.io/vega-lite/docs/scale.html#continuous>`__*
scales, expands the scale domain to accommodate the specified number of pixels on
each of the scale range. The scale range must represent pixels for this parameter to
function as intended. Padding adjustment is performed prior to all other
adjustments, including the effects of the ``zero``, ``nice``, ``domainMin``, and
``domainMax`` properties.
For *`band <https://vega.github.io/vega-lite/docs/scale.html#band>`__* scales,
shortcut for setting ``paddingInner`` and ``paddingOuter`` to the same value.
For *`point <https://vega.github.io/vega-lite/docs/scale.html#point>`__* scales,
alias for ``paddingOuter``.
**Default value:** For *continuous* scales, derived from the `scale config
<https://vega.github.io/vega-lite/docs/scale.html#config>`__'s
``continuousPadding``. For *band and point* scales, see ``paddingInner`` and
``paddingOuter``. By default, Vega-Lite sets padding such that *width/height =
number of unique values * step*.
paddingInner : dict, float, :class:`ExprRef`
The inner padding (spacing) within each band step of band scales, as a fraction of
the step size. This value must lie in the range [0,1].
For point scale, this property is invalid as point scales do not have internal band
widths (only step sizes between bands).
**Default value:** derived from the `scale config
<https://vega.github.io/vega-lite/docs/scale.html#config>`__'s ``bandPaddingInner``.
paddingOuter : dict, float, :class:`ExprRef`
The outer padding (spacing) at the ends of the range of band and point scales, as a
fraction of the step size. This value must lie in the range [0,1].
**Default value:** derived from the `scale config
<https://vega.github.io/vega-lite/docs/scale.html#config>`__'s ``bandPaddingOuter``
for band scales and ``pointPadding`` for point scales. By default, Vega-Lite sets
outer padding such that *width/height = number of unique values * step*.
range : dict, :class:`RangeEnum`, :class:`FieldRange`, Sequence[str, dict, float, Sequence[float], :class:`ExprRef`], Literal['width', 'height', 'symbol', 'category', 'ordinal', 'ramp', 'diverging', 'heatmap']
The range of the scale. One of:
* A string indicating a `pre-defined named scale range
<https://vega.github.io/vega-lite/docs/scale.html#range-config>`__ (e.g., example,
``"symbol"``, or ``"diverging"``).
* For `continuous scales
<https://vega.github.io/vega-lite/docs/scale.html#continuous>`__, two-element
array indicating minimum and maximum values, or an array with more than two
entries for specifying a `piecewise scale
<https://vega.github.io/vega-lite/docs/scale.html#piecewise>`__.
* For `discrete <https://vega.github.io/vega-lite/docs/scale.html#discrete>`__ and
`discretizing <https://vega.github.io/vega-lite/docs/scale.html#discretizing>`__
scales, an array of desired output values or an object with a ``field`` property
representing the range values. For example, if a field ``color`` contains CSS
color names, we can set ``range`` to ``{field: "color"}``.
**Notes:**
1) For color scales you can also specify a color `scheme
<https://vega.github.io/vega-lite/docs/scale.html#scheme>`__ instead of ``range``.
2) Any directly specified ``range`` for ``x`` and ``y`` channels will be ignored.
Range can be customized via the view's corresponding `size
<https://vega.github.io/vega-lite/docs/size.html>`__ (``width`` and ``height``).
rangeMax : str, dict, float, :class:`ExprRef`
Sets the maximum value in the scale range, overriding the ``range`` property or the
default range. This property is only intended for use with scales having continuous
ranges.
rangeMin : str, dict, float, :class:`ExprRef`
Sets the minimum value in the scale range, overriding the ``range`` property or the
default range. This property is only intended for use with scales having continuous
ranges.
reverse : bool, dict, :class:`ExprRef`
If true, reverses the order of the scale range. **Default value:** ``false``.
round : bool, dict, :class:`ExprRef`
If ``true``, rounds numeric output values to integers. This can be helpful for
snapping to the pixel grid.
**Default value:** ``false``.
scheme : dict, :class:`ExprRef`, :class:`Cyclical`, :class:`Diverging`, :class:`Categorical`, :class:`ColorScheme`, :class:`SchemeParams`, :class:`SequentialMultiHue`, :class:`SequentialSingleHue`, Literal['accent', 'category10', 'category20', 'category20b', 'category20c', 'dark2', 'paired', 'pastel1', 'pastel2', 'set1', 'set2', 'set3', 'tableau10', 'tableau20', 'observable10', 'blueorange', 'blueorange-3', 'blueorange-4', 'blueorange-5', 'blueorange-6', 'blueorange-7', 'blueorange-8', 'blueorange-9', 'blueorange-10', 'blueorange-11', 'brownbluegreen', 'brownbluegreen-3', 'brownbluegreen-4', 'brownbluegreen-5', 'brownbluegreen-6', 'brownbluegreen-7', 'brownbluegreen-8', 'brownbluegreen-9', 'brownbluegreen-10', 'brownbluegreen-11', 'purplegreen', 'purplegreen-3', 'purplegreen-4', 'purplegreen-5', 'purplegreen-6', 'purplegreen-7', 'purplegreen-8', 'purplegreen-9', 'purplegreen-10', 'purplegreen-11', 'pinkyellowgreen', 'pinkyellowgreen-3', 'pinkyellowgreen-4', 'pinkyellowgreen-5', 'pinkyellowgreen-6', 'pinkyellowgreen-7', 'pinkyellowgreen-8', 'pinkyellowgreen-9', 'pinkyellowgreen-10', 'pinkyellowgreen-11', 'purpleorange', 'purpleorange-3', 'purpleorange-4', 'purpleorange-5', 'purpleorange-6', 'purpleorange-7', 'purpleorange-8', 'purpleorange-9', 'purpleorange-10', 'purpleorange-11', 'redblue', 'redblue-3', 'redblue-4', 'redblue-5', 'redblue-6', 'redblue-7', 'redblue-8', 'redblue-9', 'redblue-10', 'redblue-11', 'redgrey', 'redgrey-3', 'redgrey-4', 'redgrey-5', 'redgrey-6', 'redgrey-7', 'redgrey-8', 'redgrey-9', 'redgrey-10', 'redgrey-11', 'redyellowblue', 'redyellowblue-3', 'redyellowblue-4', 'redyellowblue-5', 'redyellowblue-6', 'redyellowblue-7', 'redyellowblue-8', 'redyellowblue-9', 'redyellowblue-10', 'redyellowblue-11', 'redyellowgreen', 'redyellowgreen-3', 'redyellowgreen-4', 'redyellowgreen-5', 'redyellowgreen-6', 'redyellowgreen-7', 'redyellowgreen-8', 'redyellowgreen-9', 'redyellowgreen-10', 'redyellowgreen-11', 'spectral', 'spectral-3', 'spectral-4', 'spectral-5', 'spectral-6', 'spectral-7', 'spectral-8', 'spectral-9', 'spectral-10', 'spectral-11', 'blues', 'tealblues', 'teals', 'greens', 'browns', 'greys', 'purples', 'warmgreys', 'reds', 'oranges', 'rainbow', 'sinebow', 'turbo', 'viridis', 'inferno', 'magma', 'plasma', 'cividis', 'bluegreen', 'bluegreen-3', 'bluegreen-4', 'bluegreen-5', 'bluegreen-6', 'bluegreen-7', 'bluegreen-8', 'bluegreen-9', 'bluepurple', 'bluepurple-3', 'bluepurple-4', 'bluepurple-5', 'bluepurple-6', 'bluepurple-7', 'bluepurple-8', 'bluepurple-9', 'goldgreen', 'goldgreen-3', 'goldgreen-4', 'goldgreen-5', 'goldgreen-6', 'goldgreen-7', 'goldgreen-8', 'goldgreen-9', 'goldorange', 'goldorange-3', 'goldorange-4', 'goldorange-5', 'goldorange-6', 'goldorange-7', 'goldorange-8', 'goldorange-9', 'goldred', 'goldred-3', 'goldred-4', 'goldred-5', 'goldred-6', 'goldred-7', 'goldred-8', 'goldred-9', 'greenblue', 'greenblue-3', 'greenblue-4', 'greenblue-5', 'greenblue-6', 'greenblue-7', 'greenblue-8', 'greenblue-9', 'orangered', 'orangered-3', 'orangered-4', 'orangered-5', 'orangered-6', 'orangered-7', 'orangered-8', 'orangered-9', 'purplebluegreen', 'purplebluegreen-3', 'purplebluegreen-4', 'purplebluegreen-5', 'purplebluegreen-6', 'purplebluegreen-7', 'purplebluegreen-8', 'purplebluegreen-9', 'purpleblue', 'purpleblue-3', 'purpleblue-4', 'purpleblue-5', 'purpleblue-6', 'purpleblue-7', 'purpleblue-8', 'purpleblue-9', 'purplered', 'purplered-3', 'purplered-4', 'purplered-5', 'purplered-6', 'purplered-7', 'purplered-8', 'purplered-9', 'redpurple', 'redpurple-3', 'redpurple-4', 'redpurple-5', 'redpurple-6', 'redpurple-7', 'redpurple-8', 'redpurple-9', 'yellowgreenblue', 'yellowgreenblue-3', 'yellowgreenblue-4', 'yellowgreenblue-5', 'yellowgreenblue-6', 'yellowgreenblue-7', 'yellowgreenblue-8', 'yellowgreenblue-9', 'yellowgreen', 'yellowgreen-3', 'yellowgreen-4', 'yellowgreen-5', 'yellowgreen-6', 'yellowgreen-7', 'yellowgreen-8', 'yellowgreen-9', 'yelloworangebrown', 'yelloworangebrown-3', 'yelloworangebrown-4', 'yelloworangebrown-5', 'yelloworangebrown-6', 'yelloworangebrown-7', 'yelloworangebrown-8', 'yelloworangebrown-9', 'yelloworangered', 'yelloworangered-3', 'yelloworangered-4', 'yelloworangered-5', 'yelloworangered-6', 'yelloworangered-7', 'yelloworangered-8', 'yelloworangered-9', 'darkblue', 'darkblue-3', 'darkblue-4', 'darkblue-5', 'darkblue-6', 'darkblue-7', 'darkblue-8', 'darkblue-9', 'darkgold', 'darkgold-3', 'darkgold-4', 'darkgold-5', 'darkgold-6', 'darkgold-7', 'darkgold-8', 'darkgold-9', 'darkgreen', 'darkgreen-3', 'darkgreen-4', 'darkgreen-5', 'darkgreen-6', 'darkgreen-7', 'darkgreen-8', 'darkgreen-9', 'darkmulti', 'darkmulti-3', 'darkmulti-4', 'darkmulti-5', 'darkmulti-6', 'darkmulti-7', 'darkmulti-8', 'darkmulti-9', 'darkred', 'darkred-3', 'darkred-4', 'darkred-5', 'darkred-6', 'darkred-7', 'darkred-8', 'darkred-9', 'lightgreyred', 'lightgreyred-3', 'lightgreyred-4', 'lightgreyred-5', 'lightgreyred-6', 'lightgreyred-7', 'lightgreyred-8', 'lightgreyred-9', 'lightgreyteal', 'lightgreyteal-3', 'lightgreyteal-4', 'lightgreyteal-5', 'lightgreyteal-6', 'lightgreyteal-7', 'lightgreyteal-8', 'lightgreyteal-9', 'lightmulti', 'lightmulti-3', 'lightmulti-4', 'lightmulti-5', 'lightmulti-6', 'lightmulti-7', 'lightmulti-8', 'lightmulti-9', 'lightorange', 'lightorange-3', 'lightorange-4', 'lightorange-5', 'lightorange-6', 'lightorange-7', 'lightorange-8', 'lightorange-9', 'lighttealblue', 'lighttealblue-3', 'lighttealblue-4', 'lighttealblue-5', 'lighttealblue-6', 'lighttealblue-7', 'lighttealblue-8', 'lighttealblue-9']
A string indicating a color `scheme
<https://vega.github.io/vega-lite/docs/scale.html#scheme>`__ name (e.g.,
``"category10"`` or ``"blues"``) or a `scheme parameter object
<https://vega.github.io/vega-lite/docs/scale.html#scheme-params>`__.
Discrete color schemes may be used with `discrete
<https://vega.github.io/vega-lite/docs/scale.html#discrete>`__ or `discretizing
<https://vega.github.io/vega-lite/docs/scale.html#discretizing>`__ scales.
Continuous color schemes are intended for use with color scales.
To set a custom scheme, instead set the list of values `as the scale range
<https://vega.github.io/vega-lite/docs/scale.html#2-setting-the-range-property-to-an-array-of-valid-css-color-strings>`__.
For the full list of supported schemes, please refer to the `Vega Scheme
<https://vega.github.io/vega/docs/schemes/#reference>`__ reference.
type : :class:`ScaleType`, Literal['linear', 'log', 'pow', 'sqrt', 'symlog', 'identity', 'sequential', 'time', 'utc', 'quantile', 'quantize', 'threshold', 'bin-ordinal', 'ordinal', 'point', 'band']
The type of scale. Vega-Lite supports the following categories of scale types:
1) `Continuous Scales
<https://vega.github.io/vega-lite/docs/scale.html#continuous>`__ -- mapping
continuous domains to continuous output ranges (`"linear"
<https://vega.github.io/vega-lite/docs/scale.html#linear>`__, `"pow"
<https://vega.github.io/vega-lite/docs/scale.html#pow>`__, `"sqrt"
<https://vega.github.io/vega-lite/docs/scale.html#sqrt>`__, `"symlog"
<https://vega.github.io/vega-lite/docs/scale.html#symlog>`__, `"log"
<https://vega.github.io/vega-lite/docs/scale.html#log>`__, `"time"
<https://vega.github.io/vega-lite/docs/scale.html#time>`__, `"utc"
<https://vega.github.io/vega-lite/docs/scale.html#utc>`__.
2) `Discrete Scales <https://vega.github.io/vega-lite/docs/scale.html#discrete>`__
-- mapping discrete domains to discrete (`"ordinal"
<https://vega.github.io/vega-lite/docs/scale.html#ordinal>`__) or continuous
(`"band" <https://vega.github.io/vega-lite/docs/scale.html#band>`__ and `"point"
<https://vega.github.io/vega-lite/docs/scale.html#point>`__) output ranges.
3) `Discretizing Scales
<https://vega.github.io/vega-lite/docs/scale.html#discretizing>`__ -- mapping
continuous domains to discrete output ranges `"bin-ordinal"
<https://vega.github.io/vega-lite/docs/scale.html#bin-ordinal>`__, `"quantile"
<https://vega.github.io/vega-lite/docs/scale.html#quantile>`__, `"quantize"
<https://vega.github.io/vega-lite/docs/scale.html#quantize>`__ and `"threshold"
<https://vega.github.io/vega-lite/docs/scale.html#threshold>`__.
**Default value:** please see the `scale type table
<https://vega.github.io/vega-lite/docs/scale.html#type>`__.
zero : bool, dict, :class:`ExprRef`
If ``true``, ensures that a zero baseline value is included in the scale domain.
**Default value:** ``true`` for x and y channels if the quantitative field is not
binned and no custom ``domain`` is provided; ``false`` otherwise.
**Note:** Log, time, and utc scales do not support ``zero``.
"""
_schema = {"$ref": "#/definitions/Scale"}
def __init__(
self,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
align=align,
base=base,
bins=bins,
clamp=clamp,
constant=constant,
domain=domain,
domainMax=domainMax,
domainMid=domainMid,
domainMin=domainMin,
domainRaw=domainRaw,
exponent=exponent,
interpolate=interpolate,
nice=nice,
padding=padding,
paddingInner=paddingInner,
paddingOuter=paddingOuter,
range=range,
rangeMax=rangeMax,
rangeMin=rangeMin,
reverse=reverse,
round=round,
scheme=scheme,
type=type,
zero=zero,
**kwds,
)
| Scale |
python | pypa__pip | src/pip/_internal/cli/spinners.py | {
"start": 3487,
"end": 4809
} | class ____:
def __init__(self, min_update_interval_seconds: float) -> None:
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update: float = 0
def ready(self) -> bool:
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self) -> None:
self._last_update = time.time()
@contextlib.contextmanager
def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]:
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner: SpinnerInterface = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
| RateLimiter |
python | huggingface__transformers | tests/models/instructblip/test_modeling_instructblip.py | {
"start": 1673,
"end": 4800
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=1e-10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in case of a vision transformer, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return InstructBlipVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = InstructBlipVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| InstructBlipVisionModelTester |
python | plotly__plotly.py | plotly/graph_objs/_parcats.py | {
"start": 215,
"end": 35722
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "parcats"
_valid_props = {
"arrangement",
"bundlecolors",
"counts",
"countssrc",
"dimensiondefaults",
"dimensions",
"domain",
"hoverinfo",
"hoveron",
"hovertemplate",
"hovertemplatefallback",
"labelfont",
"legendgrouptitle",
"legendwidth",
"line",
"meta",
"metasrc",
"name",
"sortpaths",
"stream",
"tickfont",
"type",
"uid",
"uirevision",
"visible",
}
@property
def arrangement(self):
"""
Sets the drag interaction mode for categories and dimensions.
If `perpendicular`, the categories can only move along a line
perpendicular to the paths. If `freeform`, the categories can
freely move on the plane. If `fixed`, the categories and
dimensions are stationary.
The 'arrangement' property is an enumeration that may be specified as:
- One of the following enumeration values:
['perpendicular', 'freeform', 'fixed']
Returns
-------
Any
"""
return self["arrangement"]
@arrangement.setter
def arrangement(self, val):
self["arrangement"] = val
@property
def bundlecolors(self):
"""
Sort paths so that like colors are bundled together within each
category.
The 'bundlecolors' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["bundlecolors"]
@bundlecolors.setter
def bundlecolors(self, val):
self["bundlecolors"] = val
@property
def counts(self):
"""
The number of observations represented by each state. Defaults
to 1 so that each state represents one observation
The 'counts' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["counts"]
@counts.setter
def counts(self, val):
self["counts"] = val
@property
def countssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `counts`.
The 'countssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["countssrc"]
@countssrc.setter
def countssrc(self, val):
self["countssrc"] = val
@property
def dimensions(self):
"""
The dimensions (variables) of the parallel categories diagram.
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcats.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Returns
-------
tuple[plotly.graph_objs.parcats.Dimension]
"""
return self["dimensions"]
@dimensions.setter
def dimensions(self, val):
self["dimensions"] = val
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.parcats.dimensiondefaults), sets the
default property values to use for elements of
parcats.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Dimension`
- A dict of string/value properties that will be passed
to the Dimension constructor
Returns
-------
plotly.graph_objs.parcats.Dimension
"""
return self["dimensiondefaults"]
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self["dimensiondefaults"] = val
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Returns
-------
plotly.graph_objs.parcats.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['count', 'probability'] joined with '+' characters
(e.g. 'count+probability')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
Returns
-------
Any
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoveron(self):
"""
Sets the hover interaction mode for the parcats diagram. If
`category`, hover interaction take place per category. If
`color`, hover interactions take place per color per category.
If `dimension`, hover interactions take place across all
categories per dimension.
The 'hoveron' property is an enumeration that may be specified as:
- One of the following enumeration values:
['category', 'color', 'dimension']
Returns
-------
Any
"""
return self["hoveron"]
@hoveron.setter
def hoveron(self, val):
self["hoveron"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables `count`,
`probability`, `category`, `categorycount`, `colorcount` and
`bandcolorcount`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def labelfont(self):
"""
Sets the font for the `dimension` labels.
The 'labelfont' property is an instance of Labelfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Labelfont`
- A dict of string/value properties that will be passed
to the Labelfont constructor
Returns
-------
plotly.graph_objs.parcats.Labelfont
"""
return self["labelfont"]
@labelfont.setter
def labelfont(self, val):
self["labelfont"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.parcats.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.parcats.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def sortpaths(self):
"""
Sets the path sorting algorithm. If `forward`, sort paths based
on dimension categories from left to right. If `backward`, sort
paths based on dimensions categories from right to left.
The 'sortpaths' property is an enumeration that may be specified as:
- One of the following enumeration values:
['forward', 'backward']
Returns
-------
Any
"""
return self["sortpaths"]
@sortpaths.setter
def sortpaths(self, val):
self["sortpaths"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.parcats.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def tickfont(self):
"""
Sets the font for the `category` labels.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcats.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.parcats.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
arrangement
Sets the drag interaction mode for categories and
dimensions. If `perpendicular`, the categories can only
move along a line perpendicular to the paths. If
`freeform`, the categories can freely move on the
plane. If `fixed`, the categories and dimensions are
stationary.
bundlecolors
Sort paths so that like colors are bundled together
within each category.
counts
The number of observations represented by each state.
Defaults to 1 so that each state represents one
observation
countssrc
Sets the source reference on Chart Studio Cloud for
`counts`.
dimensions
The dimensions (variables) of the parallel categories
diagram.
dimensiondefaults
When used in a template (as
layout.template.data.parcats.dimensiondefaults), sets
the default property values to use for elements of
parcats.dimensions
domain
:class:`plotly.graph_objects.parcats.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoveron
Sets the hover interaction mode for the parcats
diagram. If `category`, hover interaction take place
per category. If `color`, hover interactions take place
per color per category. If `dimension`, hover
interactions take place across all categories per
dimension.
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `count`, `probability`, `category`,
`categorycount`, `colorcount` and `bandcolorcount`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
labelfont
Sets the font for the `dimension` labels.
legendgrouptitle
:class:`plotly.graph_objects.parcats.Legendgrouptitle`
instance or dict with compatible properties
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.parcats.Line` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
sortpaths
Sets the path sorting algorithm. If `forward`, sort
paths based on dimension categories from left to right.
If `backward`, sort paths based on dimensions
categories from right to left.
stream
:class:`plotly.graph_objects.parcats.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `category` labels.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
arrangement=None,
bundlecolors=None,
counts=None,
countssrc=None,
dimensions=None,
dimensiondefaults=None,
domain=None,
hoverinfo=None,
hoveron=None,
hovertemplate=None,
hovertemplatefallback=None,
labelfont=None,
legendgrouptitle=None,
legendwidth=None,
line=None,
meta=None,
metasrc=None,
name=None,
sortpaths=None,
stream=None,
tickfont=None,
uid=None,
uirevision=None,
visible=None,
**kwargs,
):
"""
Construct a new Parcats object
Parallel categories diagram for multidimensional categorical
data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Parcats`
arrangement
Sets the drag interaction mode for categories and
dimensions. If `perpendicular`, the categories can only
move along a line perpendicular to the paths. If
`freeform`, the categories can freely move on the
plane. If `fixed`, the categories and dimensions are
stationary.
bundlecolors
Sort paths so that like colors are bundled together
within each category.
counts
The number of observations represented by each state.
Defaults to 1 so that each state represents one
observation
countssrc
Sets the source reference on Chart Studio Cloud for
`counts`.
dimensions
The dimensions (variables) of the parallel categories
diagram.
dimensiondefaults
When used in a template (as
layout.template.data.parcats.dimensiondefaults), sets
the default property values to use for elements of
parcats.dimensions
domain
:class:`plotly.graph_objects.parcats.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoveron
Sets the hover interaction mode for the parcats
diagram. If `category`, hover interaction take place
per category. If `color`, hover interactions take place
per color per category. If `dimension`, hover
interactions take place across all categories per
dimension.
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `count`, `probability`, `category`,
`categorycount`, `colorcount` and `bandcolorcount`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
labelfont
Sets the font for the `dimension` labels.
legendgrouptitle
:class:`plotly.graph_objects.parcats.Legendgrouptitle`
instance or dict with compatible properties
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.parcats.Line` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
sortpaths
Sets the path sorting algorithm. If `forward`, sort
paths based on dimension categories from left to right.
If `backward`, sort paths based on dimensions
categories from right to left.
stream
:class:`plotly.graph_objects.parcats.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `category` labels.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Parcats
"""
super().__init__("parcats")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Parcats
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Parcats`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("arrangement", arg, arrangement)
self._set_property("bundlecolors", arg, bundlecolors)
self._set_property("counts", arg, counts)
self._set_property("countssrc", arg, countssrc)
self._set_property("dimensions", arg, dimensions)
self._set_property("dimensiondefaults", arg, dimensiondefaults)
self._set_property("domain", arg, domain)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoveron", arg, hoveron)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("labelfont", arg, labelfont)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("sortpaths", arg, sortpaths)
self._set_property("stream", arg, stream)
self._set_property("tickfont", arg, tickfont)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._props["type"] = "parcats"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Parcats |
python | run-llama__llama_index | llama-index-core/llama_index/core/voice_agents/interface.py | {
"start": 61,
"end": 2761
} | class ____(ABC):
"""
Abstract base class for a voice agent audio input/output interface.
"""
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Please implement this method by initializing the class with arbitrary attributes."""
...
@abstractmethod
def _speaker_callback(self, *args: Any, **kwargs: Any) -> Any:
"""
Callback function for the audio output device.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
def _microphone_callback(self, *args: Any, **kwargs: Any) -> Any:
"""
Callback function for the audio input device.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
def start(self, *args: Any, **kwargs: Any) -> None:
"""
Start the interface.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
def stop(self) -> None:
"""
Stop the interface.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
def interrupt(self) -> None:
"""
Interrupt the interface.
Args:
None
Returns:
out (None): This function does not return anything.
"""
...
@abstractmethod
def output(self, *args: Any, **kwargs: Any) -> Any:
"""
Process and output the audio.
Args:
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
@abstractmethod
def receive(self, data: Any, *args: Any, **kwargs: Any) -> Any:
"""
Receive audio data.
Args:
data (Any): received audio data (generally as bytes or str, but it is kept open also to other types).
*args: Can take any positional argument.
**kwargs: Can take any keyword argument.
Returns:
out (Any): This function can return any output.
"""
...
| BaseVoiceAgentInterface |
python | sqlalchemy__sqlalchemy | test/sql/test_syntax_extensions.py | {
"start": 4439,
"end": 8638
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_illegal_section(self):
class SomeExtension(SyntaxExtension, ClauseElement):
_traverse_internals = []
def apply_to_select(self, select_stmt):
select_stmt.apply_syntax_extension_point(
lambda existing: [self],
"not_present",
)
with expect_raises_message(
ValueError,
r"Unknown position 'not_present' for <class .*Select'> "
"construct; known positions: "
"'post_select', 'pre_columns', 'post_criteria', 'post_body'",
):
select(column("q")).ext(SomeExtension())
def test_select_post_select_clause(self):
self.assert_compile(
select(column("a"), column("b"))
.ext(PostSelectClause())
.where(column("q") == 5),
"SELECT POST SELECT KEYWORD a, b WHERE q = :q_1",
)
def test_select_pre_columns_clause(self):
self.assert_compile(
select(column("a"), column("b"))
.ext(PreColumnsClause())
.where(column("q") == 5)
.distinct(),
"SELECT DISTINCT PRE COLUMNS a, b WHERE q = :q_1",
)
def test_select_post_criteria_clause(self):
self.assert_compile(
select(column("a"), column("b"))
.ext(PostCriteriaClause())
.where(column("q") == 5)
.having(column("z") == 10)
.order_by(column("r")),
"SELECT a, b WHERE q = :q_1 HAVING z = :z_1 "
"POST CRITERIA ORDER BY r",
)
def test_select_post_criteria_clause_multiple(self):
self.assert_compile(
select(column("a"), column("b"))
.ext(PostCriteriaClause())
.ext(PostCriteriaClause2())
.where(column("q") == 5)
.having(column("z") == 10)
.order_by(column("r")),
"SELECT a, b WHERE q = :q_1 HAVING z = :z_1 "
"POST CRITERIA 2 POST CRITERIA 2 ORDER BY r",
)
def test_select_post_criteria_clause_multiple2(self):
stmt = (
select(column("a"), column("b"))
.ext(PostCriteriaClause())
.ext(PostCriteriaClause())
.ext(PostCriteriaClause2())
.ext(PostCriteriaClause2())
.where(column("q") == 5)
.having(column("z") == 10)
.order_by(column("r"))
)
# PostCriteriaClause2 is here only once
self.assert_compile(
stmt,
"SELECT a, b WHERE q = :q_1 HAVING z = :z_1 "
"POST CRITERIA POST CRITERIA 2 POST CRITERIA 2 ORDER BY r",
)
# now there is only PostCriteriaClause3
self.assert_compile(
stmt.ext(PostCriteriaClause3()),
"SELECT a, b WHERE q = :q_1 HAVING z = :z_1 "
"3 POST CRITERIA 3 ORDER BY r",
)
def test_select_post_select_body(self):
self.assert_compile(
select(column("a"), column("b"))
.ext(PostBodyClause())
.where(column("q") == 5)
.having(column("z") == 10)
.order_by(column("r"))
.limit(15),
"SELECT a, b WHERE q = :q_1 HAVING z = :z_1 "
"ORDER BY r LIMIT :param_1 POST SELECT BODY",
)
def test_insert_post_values(self):
t = table("t", column("a"), column("b"))
self.assert_compile(
t.insert().ext(PostValuesClause()),
"INSERT INTO t (a, b) VALUES (:a, :b) POST VALUES",
)
def test_update_post_criteria(self):
t = table("t", column("a"), column("b"))
self.assert_compile(
t.update().ext(PostCriteriaClause()).where(t.c.a == "hi"),
"UPDATE t SET a=:a, b=:b WHERE t.a = :a_1 POST CRITERIA",
)
def test_delete_post_criteria(self):
t = table("t", column("a"), column("b"))
self.assert_compile(
t.delete().ext(PostCriteriaClause()).where(t.c.a == "hi"),
"DELETE FROM t WHERE t.a = :a_1 POST CRITERIA",
)
| TestExtensionPoints |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 9683,
"end": 9763
} | class ____(HTTPClientError):
status_code = 431
| HTTPRequestHeaderFieldsTooLarge |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 24908,
"end": 26751
} | class ____(SimpleTestCase):
def test_valid_default_case(self):
class Model(models.Model):
field = models.FileField()
self.assertEqual(Model._meta.get_field("field").check(), [])
def test_valid_case(self):
class Model(models.Model):
field = models.FileField(upload_to="somewhere")
field = Model._meta.get_field("field")
self.assertEqual(field.check(), [])
def test_primary_key(self):
class Model(models.Model):
field = models.FileField(primary_key=False, upload_to="somewhere")
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"'primary_key' is not a valid argument for a FileField.",
obj=field,
id="fields.E201",
)
],
)
def test_upload_to_starts_with_slash(self):
class Model(models.Model):
field = models.FileField(upload_to="/somewhere")
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"FileField's 'upload_to' argument must be a relative path, not "
"an absolute path.",
obj=field,
id="fields.E202",
hint="Remove the leading slash.",
)
],
)
def test_upload_to_callable_not_checked(self):
def callable(instance, filename):
return "/" + filename
class Model(models.Model):
field = models.FileField(upload_to=callable)
field = Model._meta.get_field("field")
self.assertEqual(field.check(), [])
@isolate_apps("invalid_models_tests")
| FileFieldTests |
python | realpython__materials | python-microservices-with-grpc/marketplace/recommendations_pb2_grpc.py | {
"start": 150,
"end": 656
} | class ____(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Recommend = channel.unary_unary(
"/Recommendations/Recommend",
request_serializer=recommendations__pb2.RecommendationRequest.SerializeToString,
response_deserializer=recommendations__pb2.RecommendationResponse.FromString,
)
| RecommendationsStub |
python | django__django | django/utils/tree.py | {
"start": 155,
"end": 4394
} | class ____:
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = "DEFAULT"
def __init__(self, children=None, connector=None, negated=False):
"""Construct a new Node. If no connector is given, use the default."""
self.children = children[:] if children else []
self.connector = connector or self.default
self.negated = negated
@classmethod
def create(cls, children=None, connector=None, negated=False):
"""
Create a new instance using Node() instead of __init__() as some
subclasses, e.g. django.db.models.query_utils.Q, may implement a custom
__init__() with a signature that conflicts with the one defined in
Node.__init__().
"""
obj = Node(children, connector or cls.default, negated)
obj.__class__ = cls
return obj
def __str__(self):
template = "(NOT (%s: %s))" if self.negated else "(%s: %s)"
return template % (self.connector, ", ".join(str(c) for c in self.children))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def __copy__(self):
obj = self.create(connector=self.connector, negated=self.negated)
obj.children = self.children # Don't [:] as .__init__() via .create() does.
return obj
copy = __copy__
def __deepcopy__(self, memodict):
obj = self.create(connector=self.connector, negated=self.negated)
obj.children = copy.deepcopy(self.children, memodict)
return obj
def __len__(self):
"""Return the number of children this node has."""
return len(self.children)
def __bool__(self):
"""Return whether or not this node has children."""
return bool(self.children)
def __contains__(self, other):
"""Return True if 'other' is a direct child of this instance."""
return other in self.children
def __eq__(self, other):
return (
self.__class__ == other.__class__
and self.connector == other.connector
and self.negated == other.negated
and self.children == other.children
)
def __hash__(self):
return hash(
(
self.__class__,
self.connector,
self.negated,
*make_hashable(self.children),
)
)
def add(self, data, conn_type):
"""
Combine this tree and the data represented by data using the
connector conn_type. The combine is done by squashing the node other
away if possible.
This tree (self) will never be pushed to a child node of the
combined tree, nor will the connector or negated properties change.
Return a node which can be used in place of data regardless if the
node other got squashed or not.
"""
if self.connector != conn_type:
obj = self.copy()
self.connector = conn_type
self.children = [obj, data]
return data
elif (
isinstance(data, Node)
and not data.negated
and (data.connector == conn_type or len(data) == 1)
):
# We can squash the other node's children directly into this node.
# We are just doing (AB)(CD) == (ABCD) here, with the addition that
# if the length of the other node is 1 the connector doesn't
# matter. However, for the len(self) == 1 case we don't want to do
# the squashing, as it would alter self.connector.
self.children.extend(data.children)
return self
else:
# We could use perhaps additional logic here to see if some
# children could be used for pushdown here.
self.children.append(data)
return data
def negate(self):
"""Negate the sense of the root connector."""
self.negated = not self.negated
| Node |
python | pytorch__pytorch | test/torch_np/test_unary_ufuncs.py | {
"start": 293,
"end": 5092
} | class ____(TestCase):
def test_absolute(self):
assert_allclose(np.absolute(0.5), absolute(0.5), atol=1e-14, check_dtype=False)
def test_arccos(self):
assert_allclose(np.arccos(0.5), arccos(0.5), atol=1e-14, check_dtype=False)
def test_arccosh(self):
assert_allclose(np.arccosh(1.5), arccosh(1.5), atol=1e-14, check_dtype=False)
def test_arcsin(self):
assert_allclose(np.arcsin(0.5), arcsin(0.5), atol=1e-14, check_dtype=False)
def test_arcsinh(self):
assert_allclose(np.arcsinh(0.5), arcsinh(0.5), atol=1e-14, check_dtype=False)
def test_arctan(self):
assert_allclose(np.arctan(0.5), arctan(0.5), atol=1e-14, check_dtype=False)
def test_arctanh(self):
assert_allclose(np.arctanh(0.5), arctanh(0.5), atol=1e-14, check_dtype=False)
def test_cbrt(self):
assert_allclose(np.cbrt(0.5), cbrt(0.5), atol=1e-14, check_dtype=False)
def test_ceil(self):
assert_allclose(np.ceil(0.5), ceil(0.5), atol=1e-14, check_dtype=False)
def test_conjugate(self):
assert_allclose(
np.conjugate(0.5), conjugate(0.5), atol=1e-14, check_dtype=False
)
def test_cos(self):
assert_allclose(np.cos(0.5), cos(0.5), atol=1e-14, check_dtype=False)
def test_cosh(self):
assert_allclose(np.cosh(0.5), cosh(0.5), atol=1e-14, check_dtype=False)
def test_deg2rad(self):
assert_allclose(np.deg2rad(0.5), deg2rad(0.5), atol=1e-14, check_dtype=False)
def test_degrees(self):
assert_allclose(np.degrees(0.5), degrees(0.5), atol=1e-14, check_dtype=False)
def test_exp(self):
assert_allclose(np.exp(0.5), exp(0.5), atol=1e-14, check_dtype=False)
def test_exp2(self):
assert_allclose(np.exp2(0.5), exp2(0.5), atol=1e-14, check_dtype=False)
def test_expm1(self):
assert_allclose(np.expm1(0.5), expm1(0.5), atol=1e-14, check_dtype=False)
def test_fabs(self):
assert_allclose(np.fabs(0.5), fabs(0.5), atol=1e-14, check_dtype=False)
def test_floor(self):
assert_allclose(np.floor(0.5), floor(0.5), atol=1e-14, check_dtype=False)
def test_isfinite(self):
assert_allclose(np.isfinite(0.5), isfinite(0.5), atol=1e-14, check_dtype=False)
def test_isinf(self):
assert_allclose(np.isinf(0.5), isinf(0.5), atol=1e-14, check_dtype=False)
def test_isnan(self):
assert_allclose(np.isnan(0.5), isnan(0.5), atol=1e-14, check_dtype=False)
def test_log(self):
assert_allclose(np.log(0.5), log(0.5), atol=1e-14, check_dtype=False)
def test_log10(self):
assert_allclose(np.log10(0.5), log10(0.5), atol=1e-14, check_dtype=False)
def test_log1p(self):
assert_allclose(np.log1p(0.5), log1p(0.5), atol=1e-14, check_dtype=False)
def test_log2(self):
assert_allclose(np.log2(0.5), log2(0.5), atol=1e-14, check_dtype=False)
def test_logical_not(self):
assert_allclose(
np.logical_not(0.5), logical_not(0.5), atol=1e-14, check_dtype=False
)
def test_negative(self):
assert_allclose(np.negative(0.5), negative(0.5), atol=1e-14, check_dtype=False)
def test_positive(self):
assert_allclose(np.positive(0.5), positive(0.5), atol=1e-14, check_dtype=False)
def test_rad2deg(self):
assert_allclose(np.rad2deg(0.5), rad2deg(0.5), atol=1e-14, check_dtype=False)
def test_radians(self):
assert_allclose(np.radians(0.5), radians(0.5), atol=1e-14, check_dtype=False)
def test_reciprocal(self):
assert_allclose(
np.reciprocal(0.5), reciprocal(0.5), atol=1e-14, check_dtype=False
)
def test_rint(self):
assert_allclose(np.rint(0.5), rint(0.5), atol=1e-14, check_dtype=False)
def test_sign(self):
assert_allclose(np.sign(0.5), sign(0.5), atol=1e-14, check_dtype=False)
def test_signbit(self):
assert_allclose(np.signbit(0.5), signbit(0.5), atol=1e-14, check_dtype=False)
def test_sin(self):
assert_allclose(np.sin(0.5), sin(0.5), atol=1e-14, check_dtype=False)
def test_sinh(self):
assert_allclose(np.sinh(0.5), sinh(0.5), atol=1e-14, check_dtype=False)
def test_sqrt(self):
assert_allclose(np.sqrt(0.5), sqrt(0.5), atol=1e-14, check_dtype=False)
def test_square(self):
assert_allclose(np.square(0.5), square(0.5), atol=1e-14, check_dtype=False)
def test_tan(self):
assert_allclose(np.tan(0.5), tan(0.5), atol=1e-14, check_dtype=False)
def test_tanh(self):
assert_allclose(np.tanh(0.5), tanh(0.5), atol=1e-14, check_dtype=False)
def test_trunc(self):
assert_allclose(np.trunc(0.5), trunc(0.5), atol=1e-14, check_dtype=False)
if __name__ == "__main__":
run_tests()
| TestUnaryUfuncs |
python | pytorch__pytorch | torch/_inductor/codegen/rocm/ck_tile_universal_gemm_template.py | {
"start": 916,
"end": 6579
} | class ____:
layout_a: str
layout_b: str
layout_c: str
datatype_a: str
datatype_b: str
datatype_c: str
tile_m: int
tile_n: int
tile_k: int
warp_m: int
warp_n: int
warp_k: int
warp_tile_m: int
warp_tile_n: int
warp_tile_k: int
m_is_padded: str
n_is_padded: str
k_is_padded: str
pipeline: str
scheduler: str
epilogue: str
def layout_repr(self):
return f"{self.layout_a[0]}{self.layout_b[0]}{self.layout_c[0]}"
def dtype_repr(self):
return f"{self.datatype_a}{self.datatype_b}{self.datatype_c}"
def tile_sizes(self):
return "_".join(
[
f"{self.tile_m}{self.tile_n}{self.tile_k}",
f"{self.warp_m}{self.warp_n}{self.warp_k}",
f"{self.warp_tile_m}{self.warp_tile_n}{self.warp_tile_k}",
]
)
def name(self):
return "ck_tile_gemm_universal_" + "_".join(
[
f"{self.layout_repr()}",
f"{self.dtype_repr()}",
f"{self.tile_sizes()}",
f"{self.pipeline}",
f"{self.scheduler}",
f"{self.epilogue}",
]
)
def dict_items(self):
return asdict(self).items()
@functools.cache
def ops():
"""
Generate the supported instance dataclasses
"""
import itertools
compute_v3_instances = [
CKTileGemmOperation(
layout_a=layout_a,
layout_b=layout_b,
layout_c=layout_c,
datatype_a=datatype_a,
datatype_b=datatype_b,
datatype_c=datatype_c,
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
warp_m=warp_m,
warp_n=warp_n,
warp_k=warp_k,
warp_tile_m=warp_tile_m,
warp_tile_n=warp_tile_n,
warp_tile_k=warp_tile_k,
m_is_padded=m_is_padded,
n_is_padded=n_is_padded,
k_is_padded=k_is_padded,
pipeline="CompV3",
scheduler="Intrawave",
epilogue=epilogue,
)
for (layout_a, layout_b, layout_c) in [
("Row", "Row", "Row"),
("Row", "Col", "Row"),
]
for (datatype_a, datatype_b, datatype_c) in [("FP16",) * 3, ("BF16",) * 3]
for (tile_m, tile_n, tile_k) in [(256, 256, 32), (256, 256, 64)]
for (warp_m, warp_n, warp_k) in [(2, 2, 1)]
for (warp_tile_m, warp_tile_n, warp_tile_k) in [(32, 32, 16)]
for m_is_padded in ["true", "false"]
for n_is_padded in ["true", "false"]
for k_is_padded in ["true", "false"]
for epilogue in ["Default", "CShuffle"]
]
compute_v4_instances = [
CKTileGemmOperation(
layout_a=layout_a,
layout_b=layout_b,
layout_c=layout_c,
datatype_a=datatype_a,
datatype_b=datatype_b,
datatype_c=datatype_c,
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
warp_m=warp_m,
warp_n=warp_n,
warp_k=warp_k,
warp_tile_m=warp_tile_m,
warp_tile_n=warp_tile_n,
warp_tile_k=warp_tile_k,
m_is_padded=m_is_padded,
n_is_padded=n_is_padded,
k_is_padded=k_is_padded,
pipeline="CompV4",
scheduler="Intrawave",
epilogue=epilogue,
)
for (layout_a, layout_b, layout_c) in [
("Row", "Row", "Row"),
("Row", "Col", "Row"),
]
for (datatype_a, datatype_b, datatype_c) in [("FP16",) * 3, ("BF16",) * 3]
for (tile_m, tile_n, tile_k) in [
(256, 256, 32)
] # half the tile size since it has double buffering
for (warp_m, warp_n, warp_k) in [(2, 2, 1)]
for (warp_tile_m, warp_tile_n, warp_tile_k) in [(32, 32, 16)]
for m_is_padded in ["true", "false"]
for n_is_padded in ["true", "false"]
for k_is_padded in ["true", "false"]
for epilogue in ["Default", "CShuffle"]
]
mem_instances = [
CKTileGemmOperation(
layout_a=layout_a,
layout_b=layout_b,
layout_c=layout_c,
datatype_a=datatype_a,
datatype_b=datatype_b,
datatype_c=datatype_c,
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
warp_m=warp_m,
warp_n=warp_n,
warp_k=warp_k,
warp_tile_m=warp_tile_m,
warp_tile_n=warp_tile_n,
warp_tile_k=warp_tile_k,
m_is_padded=m_is_padded,
n_is_padded=n_is_padded,
k_is_padded=k_is_padded,
pipeline="Mem",
scheduler=scheduler,
epilogue=epilogue,
)
for (layout_a, layout_b, layout_c) in [
("Row", "Row", "Row"),
("Row", "Col", "Row"),
]
for (datatype_a, datatype_b, datatype_c) in [("FP16",) * 3, ("BF16",) * 3]
for (tile_m, tile_n, tile_k) in [(256, 256, 32), (256, 256, 64)]
for (warp_m, warp_n, warp_k) in [(2, 2, 1)]
for (warp_tile_m, warp_tile_n, warp_tile_k) in [(32, 32, 16)]
for m_is_padded in ["true", "false"]
for n_is_padded in ["true", "false"]
for k_is_padded in ["true", "false"]
for scheduler in ["Intrawave", "Interwave"]
for epilogue in ["Default", "CShuffle"]
]
return list(
itertools.chain(compute_v3_instances, compute_v4_instances, mem_instances)
)
| CKTileGemmOperation |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 214080,
"end": 214242
} | class ____(Protocol[Input, Output]):
def __call__(
self, _in: Input, /, *, config: RunnableConfig
) -> Awaitable[Output]: ...
| _RunnableCallableAsync |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-signnow/llama_index/tools/signnow/base.py | {
"start": 2114,
"end": 5403
} | class ____(BaseToolSpec):
"""
Thin wrapper over McpToolSpec:
- creates BasicMCPClient for STDIO spawn,
- dynamically pulls tools from SignNow MCP server,
- sugar factories: from_env.
See McpToolSpec.to_tool_list() / .to_tool_list_async() for getting FunctionTool.
"""
# Follow BaseToolSpec typing contract
spec_functions: List[Union[str, Tuple[str, str]]] = []
def __init__(
self,
client: ClientSession,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> None:
self._mcp_spec = McpToolSpec(
client=client,
allowed_tools=allowed_tools,
include_resources=include_resources,
)
@classmethod
def from_env(
cls,
*,
allowed_tools: Optional[Iterable[str]] = None,
include_resources: bool = False,
env_overrides: Optional[Mapping[str, str]] = None,
bin: Optional[str] = None,
cmd: str = "serve",
args: Optional[Sequence[str]] = None,
require_in_path: bool = True,
) -> "SignNowMCPToolSpec":
"""
Spawn STDIO: 'sn-mcp serve' with provided environment overrides merged
on top of the current process environment.
Supported variables (see server README):
SIGNNOW_TOKEN (token-based auth)
OR
SIGNNOW_USER_EMAIL, SIGNNOW_PASSWORD, SIGNNOW_API_BASIC_TOKEN (credential-based auth)
SIGNNOW_APP_BASE, SIGNNOW_API_BASE (optional, defaults can be used)
Parameters
----------
- bin: binary/command to spawn (default None → uses SIGNNOW_MCP_BIN or 'sn-mcp')
- cmd: subcommand (default 'serve')
- args: additional arguments for the server
- require_in_path: validate presence of binary in PATH if not absolute
"""
# Build env and filter to expected keys
env_all = _merge_env(env_overrides)
filtered = {k: v for k, v in env_all.items() if k in EXPECTED_SIGNNOW_KEYS}
_validate_auth(filtered)
# Resolve binary to absolute if possible
resolved_bin = _resolve_sn_mcp_bin(bin, require_in_path=require_in_path)
cmd_args: List[str] = [cmd]
if args:
cmd_args.extend(args)
client = BasicMCPClient(resolved_bin, args=cmd_args, env=filtered)
return cls(
client=client,
allowed_tools=list(allowed_tools) if allowed_tools else None,
include_resources=include_resources,
)
async def to_tool_list_async(self) -> List[FunctionTool]:
"""Delegate to underlying `McpToolSpec` with error handling."""
result = await self._mcp_spec.to_tool_list_async()
return cast(List[FunctionTool], result)
def to_tool_list(
self,
spec_functions: Optional[List[Union[str, Tuple[str, str]]]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Delegate to underlying `McpToolSpec` (sync) with error handling."""
# We discover tools dynamically via MCP; provided parameters are ignored.
result = self._mcp_spec.to_tool_list()
return cast(List[FunctionTool], result)
| SignNowMCPToolSpec |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 14268,
"end": 14479
} | class ____(AbstractGenericGetTestCase):
def wait(self, timeout):
g = gevent.spawn(gevent.sleep, 10)
try:
return g.get(timeout=timeout)
finally:
g.kill()
| TestGet |
python | pytorch__pytorch | test/distributed/tensor/test_attention.py | {
"start": 2989,
"end": 15869
} | class ____(DTensorTestBase):
@property
def world_size(self) -> int:
return torch.cuda.device_count()
@property
def destroy_pg_upon_exit(self) -> bool:
return False
@skip_if_lt_x_gpu(2)
@skipIfRocm # Missing _c10d_functional_autograd::all_to_all_single
@unittest.skipIf(
not PLATFORM_SUPPORTS_FUSED_ATTENTION,
"Does not support flash nor efficient attention",
)
@with_comms
def test_ring_attention_sdpa(self) -> None:
self.run_subtests(
{
"is_causal": [True, False],
"compiled": [True, False],
"backend": backends,
"load_balance": [True, False],
"rotater": [_RotateMethod.ALL_TO_ALL, _RotateMethod.ALL_GATHER],
"test_forward_only": [True, False],
"use_context": [True, False],
},
self._test_ring_attention_sdpa,
)
def _ring_attention_sdpa(
self,
cp_q: torch.Tensor,
cp_k: torch.Tensor,
cp_v: torch.Tensor,
*,
fn_eval: Callable,
mesh: DeviceMesh,
seq_dim: int,
is_causal: bool,
compiled: bool,
backend: SDPBackend,
rotater: _RotateMethod,
test_forward_only: bool,
load_balance: bool,
use_context: bool,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if not use_context:
cp_plan = _ContextParallel(
seq_dim=seq_dim,
attention_type=_ContextParallel.AttentionType.SDPA,
)
attention = SDPAWrapper(compiled=compiled, backend=backend)
attention = parallelize_module(attention, mesh, cp_plan)
if load_balance:
seq_len = cp_q.size(seq_dim)
load_balancer = _HeadTailLoadBalancer(seq_len, mesh.size(), cp_q.device)
else:
load_balancer = None
cp_q, cp_k, cp_v = _context_parallel_shard(
mesh, (cp_q, cp_k, cp_v), (seq_dim,) * 3, load_balancer=load_balancer
)
_enable_context_parallel_dispatcher()
else:
# Theoretically, context_parallel() should not be used to shard
# parameters because when require_grad is True, resize_ is not
# allowed. But requires_grad of cp_q, cp_k, and cp_v are False
# now. So we can just use context_parallel() to shard q, k, v.
# In reality, context_parallel() should be used to shard the input.
# In reality, context_parallel() should only be used to shard
# the model inputs (batch).
_cp_options.enable_load_balance = load_balance
cp_context = context_parallel(
mesh, buffers=(cp_q, cp_k, cp_v), buffer_seq_dims=(seq_dim,) * 3
)
cp_context.__enter__()
# NOTE: This demonstrates that monkey patching is not fully reliable.
# If we use SDPAWrapper directly, the monkey patching dispatch mode
# does not function correctly. To ensure proper behavior,
# F.scaled_dot_product_attention must be referenced within the
# context_parallel() scope.
attention = F.scaled_dot_product_attention
if compiled:
attention = torch.compile(
attention, fullgraph=True, backend="aot_eager"
)
for target in [cp_q, cp_k, cp_v]:
target.requires_grad = True
with CommDebugMode() as comm_mode:
with sdpa_kernel(backend):
cp_out = fn_eval(
attention,
cp_q,
cp_k,
cp_v,
is_causal=is_causal,
)
if not compiled and rotater == _RotateMethod.ALL_TO_ALL:
# Compiler and CommDebugMode do not work well together.
expect_all2all_count = (
self.world_size - 1
if test_forward_only
else self.world_size * 3 - 2
)
self.assertDictEqual(
comm_mode.get_comm_counts(),
{c10d_functional.all_to_all_single: expect_all2all_count},
)
cp_dq, cp_dk, cp_dv = cp_q.grad, cp_k.grad, cp_v.grad
for target in [cp_q, cp_k, cp_v]:
target.requires_grad = False
if not use_context:
_disable_context_parallel_dispatcher()
else:
cp_context.__exit__(None, None, None)
return cp_out, cp_dq, cp_dk, cp_dv
def _test_ring_attention_sdpa(
self,
is_causal: bool,
compiled: bool,
backend: SDPBackend,
load_balance: bool,
rotater: _RotateMethod,
test_forward_only: bool,
use_context: bool,
) -> None:
def fn_eval(fn, *args, **kwargs):
if test_forward_only:
with torch.no_grad():
return fn(*args, **kwargs)
else:
out = fn(*args, **kwargs)
out.sum().backward()
return out
if load_balance and not is_causal:
return
set_rotate_method(rotater_enum_to_str[rotater])
self.assertEqual(_cp_options.rotate_method, rotater)
device_mesh = DeviceMesh(self.device_type, torch.arange(0, self.world_size))
dtype = torch.bfloat16
bs = 8
seq_length = 1024
seq_dim = 2
dim = 32
nheads = 8
torch.manual_seed(10)
dtype = (
torch.bfloat16
if backend == SDPBackend.FLASH_ATTENTION
or backend == SDPBackend.CUDNN_ATTENTION
else torch.float32
)
q, k, v = [
torch.rand(
(bs, nheads, seq_length * self.world_size, dim),
device=self.device_type,
dtype=dtype,
requires_grad=True,
)
for _ in range(3)
]
# Ensure all ranks have the same initialization data.
with torch.no_grad():
dist.broadcast(q, src=0)
dist.broadcast(k, src=0)
dist.broadcast(v, src=0)
with sdpa_kernel(backend):
out = fn_eval(F.scaled_dot_product_attention, q, k, v, is_causal=is_causal)
cp_q, cp_k, cp_v = [target.detach().clone() for target in [q, k, v]]
cp_out, cp_dq, cp_dk, cp_dv = self._ring_attention_sdpa(
cp_q,
cp_k,
cp_v,
fn_eval=fn_eval,
mesh=device_mesh,
seq_dim=seq_dim,
is_causal=is_causal,
compiled=compiled,
backend=backend,
rotater=rotater,
test_forward_only=test_forward_only,
load_balance=load_balance,
use_context=use_context,
)
# Due to numerical error, we need to choose different atol for different
# attention kernels
(cp_out,) = context_parallel_unshard(device_mesh, [cp_out], [seq_dim])
atol = (
2e-06
if backend == SDPBackend.EFFICIENT_ATTENTION
else 8e-3 * self.world_size
)
rtol = (
1e-05
if backend == SDPBackend.EFFICIENT_ATTENTION
else 1e-3 * self.world_size
)
torch.testing.assert_close(out, cp_out, atol=atol, rtol=rtol)
if test_forward_only:
return
cp_dq, cp_dk, cp_dv = context_parallel_unshard(
device_mesh,
[cp_dq, cp_dk, cp_dv],
[seq_dim] * 3,
)
torch.testing.assert_close(q.grad, cp_dq, atol=atol, rtol=rtol)
torch.testing.assert_close(k.grad, cp_dk, atol=atol, rtol=rtol)
torch.testing.assert_close(v.grad, cp_dv, atol=atol, rtol=rtol)
def test_is_causal_behavior(self) -> None:
_cp_options.enable_load_balance = False
self.assertEqual(
_is_causal_behavior(rank=0, world_size=4, i=0, is_causal=False),
_CausalBehavior.NOT_IS_CAUSAL,
)
ranks = [
[_CausalBehavior.IS_CAUSAL, _CausalBehavior.SKIP],
[_CausalBehavior.IS_CAUSAL, _CausalBehavior.NOT_IS_CAUSAL],
]
for rank, iters in enumerate(ranks):
for i, behavior in enumerate(iters):
self.assertEqual(
_is_causal_behavior(rank=rank, world_size=2, i=i, is_causal=True),
behavior,
)
_cp_options.enable_load_balance = True
ranks = [
[_CausalBehavior.IS_CAUSAL, _CausalBehavior.NOT_IS_CAUSAL],
[_CausalBehavior.IS_CAUSAL, _CausalBehavior.NOT_IS_CAUSAL],
]
for rank, iters in enumerate(ranks):
for i, behavior in enumerate(iters):
self.assertEqual(
_is_causal_behavior(rank=rank, world_size=2, i=i, is_causal=True),
behavior,
)
# Compile the flex_attention function
compiled_flex_attention = torch.compile(flex_attention, dynamic=False, fullgraph=True)
compiled_create_block_mask = torch.compile(
create_block_mask, dynamic=False, fullgraph=True
)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
# copied from https://github.com/meta-pytorch/attention-gym/blob/main/attn_gym/masks/document_mask.py
def generate_random_lengths(total_length, num_documents) -> list[int]:
# Initialize all lengths to 1 to ensure each document has at least one token
lengths = [1] * num_documents
remaining_length = total_length - num_documents
# Randomly distribute the remaining length
for _ in range(remaining_length):
index = random.randint(0, num_documents - 1)
lengths[index] += 1
return lengths
def generate_random_lengths_in_chunks(
total_length, num_documents, chunk_size
) -> list[int]:
# Generate a list of random document lengths so that each document contains
# some number of chunks of size `chunk_size`. This means each document's length
# must be a multiple of `chunk_size`. Besides, the lengths of all the documents
# sum up to `total_length`.
num_chunks = total_length // chunk_size
assert total_length % chunk_size == 0 and num_chunks >= num_documents
num_chunks_per_document = [1] * num_documents
remaining_chunks = num_chunks - num_documents
# Randomly distribute the remaining chunks
for _ in range(remaining_chunks):
index = random.randint(0, num_documents - 1) # document_id
num_chunks_per_document[index] += 1
return [num_chunks * chunk_size for num_chunks in num_chunks_per_document]
def length_to_offsets(lengths: list[list[int]], device: str | torch.device) -> Tensor:
"""Converts a list of lengths to a list of offsets.
Args:
lengths: A list of lengths.
"""
offsets = [[0] + lengths_in_batch for lengths_in_batch in lengths]
offsets = torch.tensor(offsets, device=device, dtype=torch.int32)
offsets = torch.cumsum(offsets, dim=-1)
return offsets
def _offsets_to_doc_ids_tensor(offsets):
doc_ids = []
device = offsets.device
for batch_idx in range(offsets.size(0)):
counts = offsets[batch_idx][1:] - offsets[batch_idx][:-1]
doc_id = torch.repeat_interleave(
torch.arange(len(counts), device=device, dtype=torch.int32), counts
)
doc_ids.append(doc_id)
return torch.stack(doc_ids)
def generate_doc_mask_mod(
mask_mod: _mask_mod_signature, offsets: Tensor
) -> _mask_mod_signature:
"""Generates mask mods that apply to inputs to flex attention in the sequence stacked
format.
Args:
mask_mod: The mask mod to apply to the documents
offsets: This tensor should be of shape(num_documents + 1)
this should contain the cumulative counts of document tokens.
e.g. if you have 3 documents of length 2, 4, 3 then
offsets = [0, 2, 6, 9]
Note:
What is the sequence stacked format? When assembling batches of inputs, we
take multiple sequences and stack them together to form 1 large sequence. We then
use masking to ensure that the attention scores are only applied to tokens within
the same document.
"""
document_id = _offsets_to_doc_ids_tensor(offsets)
def doc_mask_mod(b, h, q_idx, kv_idx):
same_doc = document_id[b][q_idx] == document_id[b][kv_idx]
q_logical = q_idx - offsets[b, document_id[b, q_idx]]
kv_logical = kv_idx - offsets[b, document_id[b, kv_idx]]
inner_mask = mask_mod(b, h, q_logical, kv_logical)
return same_doc & inner_mask
return doc_mask_mod
| RingAttentionTest |
python | openai__openai-python | src/openai/types/shared/response_format_text.py | {
"start": 195,
"end": 326
} | class ____(BaseModel):
type: Literal["text"]
"""The type of response format being defined. Always `text`."""
| ResponseFormatText |
python | sqlalchemy__sqlalchemy | examples/inheritance/concrete.py | {
"start": 724,
"end": 1008
} | class ____(Base):
__tablename__ = "company"
id: Mapped[intpk]
name: Mapped[str50]
employees: Mapped[list[Person]] = relationship(
back_populates="company", cascade="all, delete-orphan"
)
def __repr__(self):
return f"Company {self.name}"
| Company |
python | django__django | django/core/cache/backends/base.py | {
"start": 257,
"end": 322
} | class ____(ImproperlyConfigured):
pass
| InvalidCacheBackendError |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 28245,
"end": 39121
} | class ____(Request):
"""
Create a new model not associated with a task
:param uri: URI for the model
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Case insensitive.
Should be identical to the framework of the task which created the model.
:type framework: str
:param design: Json[d] object representing the model design. Should be
identical to the network design of the task which created the model
:type design: dict
:param labels: Json object
:type labels: dict
:param ready: Indication if the model is final and can be used by other tasks.
Default is false.
:type ready: bool
:param public: Create a public model Default is false.
:type public: bool
:param project: Project to which to model belongs
:type project: str
:param parent: Parent model
:type parent: str
:param task: Associated task ID
:type task: str
"""
_service = "models"
_action = "create"
_version = "2.20"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"design": {
"additionalProperties": True,
"description": "Json[d] object representing the model design. Should be identical to the network design of the task which created the model",
"type": "object",
},
"framework": {
"description": "Framework on which the model is based. Case insensitive.Should be identical to the framework of the task which created the model.",
"type": "string",
},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object",
"type": "object",
},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"parent": {"description": "Parent model", "type": "string"},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"public": {
"default": False,
"description": "Create a public model Default is false.",
"type": "boolean",
},
"ready": {
"default": False,
"description": "Indication if the model is final and can be used by other tasks. Default is false.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"uri": {"description": "URI for the model", "type": "string"},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
},
},
"required": ["uri", "name"],
"type": "object",
}
def __init__(
self,
uri: str,
name: str,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
ready: Optional[bool] = False,
public: Optional[bool] = False,
project: Optional[str] = None,
parent: Optional[str] = None,
task: Optional[str] = None,
metadata: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.ready = ready
self.public = public
self.project = project
self.parent = parent
self.task = task
self.metadata = metadata
@schema_property("uri")
def uri(self) -> str:
return self._property_uri
@uri.setter
def uri(self, value: str) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("public")
def public(self) -> Optional[bool]:
return self._property_public
@public.setter
def public(self, value: Optional[bool]) -> None:
if value is None:
self._property_public = None
return
self.assert_isinstance(value, "public", (bool,))
self._property_public = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (dict,))
self._property_metadata = value
| CreateRequest |
python | spyder-ide__spyder | spyder/widgets/helperwidgets.py | {
"start": 19142,
"end": 21692
} | class ____(QTableView):
"""QTableView subclass that can highlight an entire row when hovered."""
sig_hover_index_changed = Signal(object)
"""
This is emitted when the index that is currently hovered has changed.
Parameters
----------
index: object
QModelIndex that has changed on hover.
"""
def __init__(self, parent, custom_delegate=False):
QTableView.__init__(self, parent)
# For mouseMoveEvent
self.setMouseTracking(True)
# To remove background color for the hovered row when the mouse is not
# over the widget.
css = qstylizer.style.StyleSheet()
css["QTableView::item"].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_1
)
self._stylesheet = css.toString()
if not custom_delegate:
self._set_delegate()
# ---- Qt methods
def mouseMoveEvent(self, event):
self._inform_hover_index_changed(event)
def wheelEvent(self, event):
super().wheelEvent(event)
self._inform_hover_index_changed(event)
def leaveEvent(self, event):
super().leaveEvent(event)
self.setStyleSheet(self._stylesheet)
def enterEvent(self, event):
super().enterEvent(event)
self.setStyleSheet("")
# ---- Private methods
def _inform_hover_index_changed(self, event):
index = self.indexAt(event.pos())
if index.isValid():
self.sig_hover_index_changed.emit(index)
self.viewport().update()
def _set_delegate(self):
"""
Set a custom item delegate that can highlight the current row when
hovered.
"""
class HoverRowDelegate(QItemDelegate):
def __init__(self, parent):
super().__init__(parent)
self._hovered_row = -1
def on_hover_index_changed(self, index):
self._hovered_row = index.row()
def paint(self, painter, option, index):
# This paints the entire row associated to the delegate when
# it's hovered.
if index.row() == self._hovered_row:
painter.fillRect(
option.rect, QColor(SpyderPalette.COLOR_BACKGROUND_3)
)
super().paint(painter, option, index)
self.setItemDelegate(HoverRowDelegate(self))
self.sig_hover_index_changed.connect(
self.itemDelegate().on_hover_index_changed
)
| HoverRowsTableView |
python | docker__docker-py | tests/unit/context_test.py | {
"start": 180,
"end": 1624
} | class ____(unittest.TestCase):
@pytest.mark.skipif(
IS_WINDOWS_PLATFORM, reason='Linux specific path check'
)
def test_url_compatibility_on_linux(self):
c = Context("test")
assert c.Host == DEFAULT_UNIX_SOCKET[5:]
@pytest.mark.skipif(
not IS_WINDOWS_PLATFORM, reason='Windows specific path check'
)
def test_url_compatibility_on_windows(self):
c = Context("test")
assert c.Host == DEFAULT_NPIPE
def test_fail_on_default_context_create(self):
with pytest.raises(docker.errors.ContextException):
ContextAPI.create_context("default")
def test_default_in_context_list(self):
found = False
ctx = ContextAPI.contexts()
for c in ctx:
if c.Name == "default":
found = True
assert found is True
def test_get_current_context(self):
assert ContextAPI.get_current_context().Name == "default"
def test_https_host(self):
c = Context("test", host="tcp://testdomain:8080", tls=True)
assert c.Host == "https://testdomain:8080"
def test_context_inspect_without_params(self):
ctx = ContextAPI.inspect_context()
assert ctx["Name"] == "default"
assert ctx["Metadata"]["StackOrchestrator"] == "swarm"
assert ctx["Endpoints"]["docker"]["Host"] in (
DEFAULT_NPIPE,
DEFAULT_UNIX_SOCKET[5:],
)
| BaseContextTest |
python | openai__openai-python | tests/api_resources/responses/test_input_tokens.py | {
"start": 395,
"end": 2711
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_count(self, client: OpenAI) -> None:
input_token = client.responses.input_tokens.count()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
def test_method_count_with_all_params(self, client: OpenAI) -> None:
input_token = client.responses.input_tokens.count(
conversation="string",
input="string",
instructions="instructions",
model="model",
parallel_tool_calls=True,
previous_response_id="resp_123",
reasoning={
"effort": "none",
"generate_summary": "auto",
"summary": "auto",
},
text={
"format": {"type": "text"},
"verbosity": "low",
},
tool_choice="none",
tools=[
{
"name": "name",
"parameters": {"foo": "bar"},
"strict": True,
"type": "function",
"description": "description",
}
],
truncation="auto",
)
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
def test_raw_response_count(self, client: OpenAI) -> None:
response = client.responses.input_tokens.with_raw_response.count()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
input_token = response.parse()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
def test_streaming_response_count(self, client: OpenAI) -> None:
with client.responses.input_tokens.with_streaming_response.count() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
input_token = response.parse()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
assert cast(Any, response.is_closed) is True
| TestInputTokens |
python | walkccc__LeetCode | solutions/2488. Count Subarrays With Median K/2488.py | {
"start": 0,
"end": 665
} | class ____:
def countSubarrays(self, nums: list[int], k: int) -> int:
INDEX = nums.index(k)
ans = 0
count = collections.Counter()
balance = 0
for i in range(INDEX, -1, -1):
if nums[i] < k:
balance -= 1
elif nums[i] > k:
balance += 1
count[balance] += 1
balance = 0
for i in range(INDEX, len(nums)):
if nums[i] < k:
balance -= 1
elif nums[i] > k:
balance += 1
# The subarray that has balance == 0 or 1 having median equal to k.
# So, add count[0 - balance] and count[1 - balance] to `ans`.
ans += count[-balance] + count[1 - balance]
return ans
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 53400,
"end": 53994
} | class ____(Blockwise):
_parameters = ["frame"]
operation = staticmethod(_check_divisions)
_preserves_partitioning_information = True
@functools.cached_property
def _meta(self):
return self.frame._meta
def _task(self, name: Key, index: int) -> Task:
args = [self._blockwise_arg(op, index) for op in self._args]
args = args + [
index,
self.divisions[index],
self.divisions[index + 1],
index == (self.npartitions - 1),
]
return Task(name, self.operation, *args)
| EnforceRuntimeDivisions |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 81527,
"end": 82356
} | class ____(Response):
"""
Response of datasets.create endpoint.
:param id: ID of the dataset
:type id: str
"""
_service = "datasets"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "ID of the dataset", "type": ["string", "null"]}
},
"type": "object",
}
def __init__(self, id=None, **kwargs):
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CreateResponse |
python | django__django | tests/auth_tests/test_remote_user.py | {
"start": 16838,
"end": 17501
} | class ____(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split("@")[0]
def configure_user(self, request, user, created=True):
"""
Sets user's email address using the email specified in an HTTP header.
Sets user's last name for existing users.
"""
user.email = request.META.get(RemoteUserTest.email_header, "")
if not created:
user.last_name = user.username
user.save()
return user
| CustomRemoteUserBackend |
python | ansible__ansible | packaging/release.py | {
"start": 4436,
"end": 9169
} | class ____:
"""
Simple command line framework inspired by nox.
Argument parsing is handled by argparse. Each function annotated with an instance of this class becomes a subcommand.
Options are shared across all commands, and are defined by providing kwargs when creating an instance of this class.
Options are only defined for commands which have a matching parameter.
The name of each kwarg is the option name, which will be prefixed with `--` and with underscores converted to dashes.
The value of each kwarg is passed as kwargs to ArgumentParser.add_argument. Passing None results in an internal only parameter.
The following custom kwargs are recognized and are not passed to add_argument:
name - Override the positional argument (option) passed to add_argument.
exclusive - Put the argument in an exclusive group of the given name.
"""
def __init__(self, **kwargs: dict[str, t.Any] | None) -> None:
self.commands: list[t.Callable[..., None]] = []
self.arguments = kwargs
self.parsed_arguments: argparse.Namespace | None = None
def __call__[T: t.Callable[..., None]](self, func: T) -> T:
"""Register the decorated function as a CLI command."""
self.commands.append(func)
return func
def run(self, *args: t.Callable[..., None], **kwargs) -> None:
"""Run the specified command(s), using any provided internal args."""
for arg in args:
self._run(arg, **kwargs)
def main(self) -> None:
"""Main program entry point."""
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(metavar="COMMAND", required=True)
for func in self.commands:
func_parser = subparsers.add_parser(self._format_command_name(func), description=func.__doc__, help=func.__doc__)
func_parser.set_defaults(func=func)
exclusive_groups = {}
signature = inspect.signature(func)
for name in signature.parameters:
if name not in self.arguments:
raise RuntimeError(f"The '{name}' argument, used by '{func.__name__}', has not been defined.")
if (arguments := self.arguments.get(name)) is None:
continue # internal use
arguments = arguments.copy()
exclusive = arguments.pop("exclusive", None)
# noinspection PyProtectedMember, PyUnresolvedReferences
command_parser: argparse._ActionsContainer
if exclusive:
if exclusive not in exclusive_groups:
exclusive_groups[exclusive] = func_parser.add_mutually_exclusive_group()
command_parser = exclusive_groups[exclusive]
else:
command_parser = func_parser
if option_name := arguments.pop("name", None):
arguments.update(dest=name)
else:
option_name = f"--{name.replace('_', '-')}"
command_parser.add_argument(option_name, **arguments)
try:
# noinspection PyUnresolvedReferences
import argcomplete
except ImportError:
pass
else:
argcomplete.autocomplete(parser)
self.parsed_arguments = parser.parse_args()
try:
self.run(self.parsed_arguments.func)
except ApplicationError as ex:
display.fatal(ex)
sys.exit(1)
def _run(self, func: t.Callable[..., None], **kwargs) -> None:
"""Run the specified command, using any provided internal args."""
signature = inspect.signature(func)
func_args = {name: getattr(self.parsed_arguments, name) for name in signature.parameters if hasattr(self.parsed_arguments, name)}
func_args.update({name: value for name, value in kwargs.items() if name in signature.parameters})
printable_args = ", ".join(f"{name}={repr(value)}" for name, value in func_args.items())
label = f"{self._format_command_name(func)}({printable_args})"
display.show(f"==> {label}", color=Display.BLUE)
try:
func(**func_args)
except BaseException:
display.show(f"!!! {label}", color=Display.RED)
raise
display.show(f"<== {label}", color=Display.BLUE)
@staticmethod
def _format_command_name(func: t.Callable[..., None]) -> str:
"""Return the friendly name of the given command."""
return func.__name__.replace("_", "-")
display = Display()
# endregion
# region Data Classes
@dataclasses.dataclass(frozen=True)
| CommandFramework |
python | python__mypy | mypy/applytype.py | {
"start": 7575,
"end": 12032
} | class ____(TypeTranslator):
"""Make free type variables generic in the type if possible.
See docstring for apply_poly() for details.
"""
def __init__(
self,
poly_tvars: Iterable[TypeVarLikeType],
bound_tvars: frozenset[TypeVarLikeType] = frozenset(),
seen_aliases: frozenset[TypeInfo] = frozenset(),
) -> None:
super().__init__()
self.poly_tvars = set(poly_tvars)
# This is a simplified version of TypeVarScope used during semantic analysis.
self.bound_tvars = bound_tvars
self.seen_aliases = seen_aliases
def collect_vars(self, t: CallableType | Parameters) -> list[TypeVarLikeType]:
found_vars = []
for arg in t.arg_types:
for tv in get_all_type_vars(arg):
if isinstance(tv, ParamSpecType):
normalized: TypeVarLikeType = tv.copy_modified(
flavor=ParamSpecFlavor.BARE, prefix=Parameters([], [], [])
)
else:
normalized = tv
if normalized in self.poly_tvars and normalized not in self.bound_tvars:
found_vars.append(normalized)
return remove_dups(found_vars)
def visit_callable_type(self, t: CallableType) -> Type:
found_vars = self.collect_vars(t)
self.bound_tvars |= set(found_vars)
result = super().visit_callable_type(t)
self.bound_tvars -= set(found_vars)
assert isinstance(result, ProperType) and isinstance(result, CallableType)
result.variables = result.variables + tuple(found_vars)
return result
def visit_type_var(self, t: TypeVarType) -> Type:
if t in self.poly_tvars and t not in self.bound_tvars:
raise PolyTranslationError()
return super().visit_type_var(t)
def visit_param_spec(self, t: ParamSpecType) -> Type:
if t in self.poly_tvars and t not in self.bound_tvars:
raise PolyTranslationError()
return super().visit_param_spec(t)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> Type:
if t in self.poly_tvars and t not in self.bound_tvars:
raise PolyTranslationError()
return super().visit_type_var_tuple(t)
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
if not t.args:
return t.copy_modified()
if not t.is_recursive:
return get_proper_type(t).accept(self)
# We can't handle polymorphic application for recursive generic aliases
# without risking an infinite recursion, just give up for now.
raise PolyTranslationError()
def visit_instance(self, t: Instance) -> Type:
if t.type.has_param_spec_type:
# We need this special-casing to preserve the possibility to store a
# generic function in an instance type. Things like
# forall T . Foo[[x: T], T]
# are not really expressible in current type system, but this looks like
# a useful feature, so let's keep it.
param_spec_index = next(
i for (i, tv) in enumerate(t.type.defn.type_vars) if isinstance(tv, ParamSpecType)
)
p = get_proper_type(t.args[param_spec_index])
if isinstance(p, Parameters):
found_vars = self.collect_vars(p)
self.bound_tvars |= set(found_vars)
new_args = [a.accept(self) for a in t.args]
self.bound_tvars -= set(found_vars)
repl = new_args[param_spec_index]
assert isinstance(repl, ProperType) and isinstance(repl, Parameters)
repl.variables = list(repl.variables) + list(found_vars)
return t.copy_modified(args=new_args)
# There is the same problem with callback protocols as with aliases
# (callback protocols are essentially more flexible aliases to callables).
if t.args and t.type.is_protocol and t.type.protocol_members == ["__call__"]:
if t.type in self.seen_aliases:
raise PolyTranslationError()
call = mypy.subtypes.find_member("__call__", t, t, is_operator=True)
assert call is not None
return call.accept(
PolyTranslator(self.poly_tvars, self.bound_tvars, self.seen_aliases | {t.type})
)
return super().visit_instance(t)
| PolyTranslator |
python | sympy__sympy | sympy/tensor/array/dense_ndim_array.py | {
"start": 3658,
"end": 4719
} | class ____(DenseNDimArray, ImmutableNDimArray): # type: ignore
def __new__(cls, iterable, shape=None, **kwargs):
return cls._new(iterable, shape, **kwargs)
@classmethod
def _new(cls, iterable, shape, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
shape = Tuple(*map(_sympify, shape))
cls._check_special_bounds(flat_list, shape)
flat_list = flatten(flat_list)
flat_list = Tuple(*flat_list)
self = Basic.__new__(cls, flat_list, shape, **kwargs)
self._shape = shape
self._array = list(flat_list)
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape, 1)
return self
def __setitem__(self, index, value):
raise TypeError('immutable N-dim array')
def as_mutable(self):
return MutableDenseNDimArray(self)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import simplify
return self.applyfunc(simplify)
| ImmutableDenseNDimArray |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 91893,
"end": 97698
} | class ____(QueryTest):
__dialect__ = "default"
__sparse_driver_backend__ = True
def test_first(self):
User = self.classes.User
assert User(id=7) == fixture_session().query(User).first()
assert (
fixture_session().query(User).filter(User.id == 27).first() is None
)
def test_negative_indexes_raise(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).order_by(User.id)
with expect_raises_message(
IndexError,
"negative indexes are not accepted by SQL index / slice operators",
):
q[-5:-2]
with expect_raises_message(
IndexError,
"negative indexes are not accepted by SQL index / slice operators",
):
q[-1]
with expect_raises_message(
IndexError,
"negative indexes are not accepted by SQL index / slice operators",
):
q[-5]
with expect_raises_message(
IndexError,
"negative indexes are not accepted by SQL index / slice operators",
):
q[:-2]
# this doesn't evaluate anything because it's a net-negative
eq_(q[-2:-5], [])
def test_limit_offset_applies(self):
"""Test that the expected LIMIT/OFFSET is applied for slices.
The LIMIT/OFFSET syntax differs slightly on all databases, and
query[x:y] executes immediately, so we are asserting against
SQL strings using sqlite's syntax.
"""
User = self.classes.User
sess = fixture_session()
q = sess.query(User).order_by(User.id)
self.assert_sql(
testing.db,
lambda: q[10:20],
[
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 10, "param_2": 10},
)
],
)
self.assert_sql(
testing.db,
lambda: q[:20],
[
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users ORDER BY users.id "
"LIMIT :param_1",
{"param_1": 20},
)
],
)
self.assert_sql(
testing.db,
lambda: q[5:],
[
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users ORDER BY users.id "
"LIMIT -1 OFFSET :param_1",
{"param_1": 5},
)
],
)
self.assert_sql(testing.db, lambda: q[2:2], [])
self.assert_sql(testing.db, lambda: q[-2:-5], [])
self.assert_sql(
testing.db,
lambda: q[:],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id",
{},
)
],
)
@testing.requires.sql_expression_limit_offset
def test_first_against_expression_offset(self):
User = self.classes.User
sess = fixture_session()
q = (
sess.query(User)
.order_by(User.id)
.offset(literal_column("2") + literal_column("3"))
)
self.assert_sql(
testing.db,
q.first,
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET 2 + 3",
[{"param_1": 1}],
)
],
)
@testing.requires.sql_expression_limit_offset
def test_full_slice_against_expression_offset(self):
User = self.classes.User
sess = fixture_session()
q = (
sess.query(User)
.order_by(User.id)
.offset(literal_column("2") + literal_column("3"))
)
self.assert_sql(
testing.db,
lambda: q[2:5],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET 2 + 3 + :param_2",
[{"param_1": 3, "param_2": 2}],
)
],
)
def test_full_slice_against_integer_offset(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).order_by(User.id).offset(2)
self.assert_sql(
testing.db,
lambda: q[2:5],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET :param_2",
[{"param_1": 3, "param_2": 4}],
)
],
)
@testing.requires.sql_expression_limit_offset
def test_start_slice_against_expression_offset(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User).order_by(User.id).offset(literal_column("2"))
self.assert_sql(
testing.db,
lambda: q[2:],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT -1 OFFSET 2 + :2_1",
[{"2_1": 2}],
)
],
)
| SliceTest |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_container.py | {
"start": 14474,
"end": 15349
} | class ____:
def test_valid(self) -> None:
prop = bcpc.Len(bcpc.List(Int), 2)
assert prop.is_valid([0, 1])
def test_invalid(self) -> None:
prop = bcpc.Len(bcpc.List(Int), 2)
assert not prop.is_valid([])
assert not prop.is_valid([0])
assert not prop.is_valid([0, 1, 2])
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpc, ALL)
| Test_Len |
python | keras-team__keras | keras/src/utils/tracking.py | {
"start": 769,
"end": 4589
} | class ____:
"""Attribute tracker, used for e.g. Variable tracking.
Monitors certain attribute types
and put them in appropriate lists in case of a match.
Also passively tracks certain mutable collections
(dict, list) so that items added to them later
still get tracked. This is done by wrapping these
collections into an equivalent, tracking-aware object.
Example:
```python
def __init__(self):
self.tracker = Tracker(
# Format: `name: (test_fn, store)`
{
"variables":
(lambda x: isinstance(x, Variable), self._variables),
"metrics": (lambda x: isinstance(x, Metric), self._metrics),
"layers": (lambda x: isinstance(x, Layer), self._layers),
}
)
def __setattr__(self, name, value):
if hasattr(self, "_tracker"):
value = self._tracker.track(value)
return super().__setattr__(name, value)
```
"""
def __init__(self, config, exclusions=None):
self.config = config
self.stored_ids = {name: set() for name in self.config.keys()}
self.locked = False
self._lock_violation_msg = None
self.exclusions = exclusions or {}
def track(self, attr):
if not is_tracking_enabled():
return attr
for store_name, (is_attr_type, _) in self.config.items():
if is_attr_type(attr):
if store_name in self.exclusions:
for excl in self.exclusions[store_name]:
if self.is_in_store(excl, attr):
return attr
if not self.is_in_store(store_name, attr):
self.add_to_store(store_name, attr)
return attr
if isinstance(attr, tuple) and hasattr(attr, "_fields"):
# Named tuple case.
wrapped_attr = {}
for name, e in attr._asdict().items():
wrapped_attr[name] = self.track(e)
return attr.__class__(**wrapped_attr)
if isinstance(attr, tuple):
wrapped_attr = []
for e in attr:
wrapped_attr.append(self.track(e))
return attr.__class__(wrapped_attr)
elif isinstance(attr, list):
return TrackedList(attr, self)
elif isinstance(attr, dict):
# TODO: OrderedDict?
return TrackedDict(attr, self)
elif isinstance(attr, set):
return TrackedSet(attr, self)
return attr
def untrack(self, value):
for store_name in self.stored_ids.keys():
if id(value) in self.stored_ids[store_name]:
self.stored_ids[store_name].remove(id(value))
python_utils.remove_by_id(self.config[store_name][1], value)
def lock(self, msg=None):
self.locked = True
if msg is not None:
self._lock_violation_msg = msg
def unlock(self):
self.locked = False
def add_to_store(self, store_name, value):
if self.locked:
raise ValueError(self._lock_violation_msg)
self.config[store_name][1].append(value)
self.stored_ids[store_name].add(id(value))
def is_in_store(self, store_name, value):
return id(value) in self.stored_ids[store_name]
def replace_tracked_value(self, store_name, old_value, new_value):
if not self.is_in_store(store_name, old_value):
raise ValueError(f"Unknown value: {old_value}")
store_list = self.config[store_name][1]
index = store_list.index(old_value)
store_list[index] = new_value
self.stored_ids[store_name].remove(id(old_value))
self.stored_ids[store_name].add(id(new_value))
@tree.register_tree_node_class
| Tracker |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py | {
"start": 203080,
"end": 213604
} | class ____(quantize_model_test_base.QuantizedModelTest):
def _run_model_in_sess(self, model_dir, tags, signature_key, sample_inputs):
with tensorflow.compat.v1.Session(graph=tensorflow.Graph()) as sess:
meta_graph = saved_model_loader.load(sess, tags, export_dir=model_dir)
signature_def = meta_graph.signature_def[signature_key]
# DumpTensorOp only works in graph mode.
# Execute the model using session to run DumpTensorOp.
output_tensor_names = [
output_tensor_info.name
for output_tensor_info in signature_def.outputs.values()
]
output_values = []
for sample_input in sample_inputs:
feed_dict = {}
for input_key, input_value in sample_input.items():
input_tensor_name = signature_def.inputs[input_key].name
feed_dict[input_tensor_name] = input_value
# Obtain the output of the model.
output_values.append(
sess.run(output_tensor_names, feed_dict=feed_dict)[0]
)
return output_values
def _read_tensor_array_file(self, file_path):
tensor_protos = []
for raw_record in tf_record.tf_record_iterator(file_path, options='ZLIB'):
tensor_protos.append(
tensorflow.make_ndarray(tensor_pb2.TensorProto.FromString(raw_record))
)
return np.array(tensor_protos)
@parameterized.named_parameters(
{
'testcase_name': 'none',
'activation_fn': None,
'has_bias': False,
},
{
'testcase_name': 'relu',
'activation_fn': nn_ops.relu,
'has_bias': False,
},
{
'testcase_name': 'with_bias',
'activation_fn': None,
'has_bias': True,
},
{
'testcase_name': 'with_bias_and_relu',
'activation_fn': nn_ops.relu,
'has_bias': True,
},
)
def test_conv2d_ptq_model_whole_model_verify(self, activation_fn, has_bias):
input_shape = [None, None, None, 3]
filter_shape = [2, 3, 3, 2]
model = self._create_conv2d_model(
input_shape,
filter_shape,
activation_fn=activation_fn,
has_bias=has_bias,
)
saved_model_save.save(model, self._input_saved_model_path)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(8):
yield {
'input_tensor': ops.convert_to_tensor(
np.random.uniform(low=0, high=150, size=(1, 3, 4, 3)).astype(
'f4'
)
),
}
tags = {tag_constants.SERVING}
unquantized_dump_model_path = self.create_tempdir().full_path
log_dir_path = self.create_tempdir().full_path
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
op_set=quant_opts_pb2.XLA,
debugger_config=_DebuggerConfig(
debugger_type=_DebuggerConfig.DebuggerType.DEBUGGER_TYPE_WHOLE_MODEL,
unquantized_dump_model_path=unquantized_dump_model_path,
log_dir_path=log_dir_path,
),
tags=tags,
signature_keys=['serving_default'],
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
sample_inputs = [
{'input_tensor': np.random.uniform(low=0, high=1, size=(16, 3, 4, 3))},
{'input_tensor': np.random.uniform(low=0, high=1, size=(16, 3, 4, 3))},
]
# Check if output of the model and value saved by DumpTensorOp matches.
# Verify for both unquantized model and quantized model.
for model_path, file_name in [
[unquantized_dump_model_path, 'unquantized_tensor_data.pb'],
[self._output_saved_model_path, 'quantized_tensor_data.pb'],
]:
output_values = self._run_model_in_sess(
model_path, tags, 'serving_default', sample_inputs
)
# Find the dump file and parse it.
folder = os.path.join(log_dir_path, os.listdir(log_dir_path)[0])
dump_file_path = os.path.join(log_dir_path, folder, file_name)
dump_file_numpy = self._read_tensor_array_file(dump_file_path)
# Since the model only has one conv2d and its output is directly used as
# the output of the model, output of the model and conv2d's dump value
# should be the same.
self.assertAllClose(output_values, dump_file_numpy)
# Verify if quant_unit.pb file was created correctly.
quant_unit_file_path = os.path.join(log_dir_path, folder, 'quant_unit.pb')
quant_unit = (
quant_opts_pb2.UnitWiseQuantizationSpec.QuantizationUnit.FromString(
open(quant_unit_file_path, 'rb').read()
)
)
self.assertEqual(quant_unit.node_name, 'Conv2D')
self.assertRegex(quant_unit.func_name, r'^__inference_conv_\d+')
@parameterized.parameters(
testing.parameter_combinations([{
'activation_fn': [None, nn_ops.relu, nn_ops.relu6],
'has_bias': [True, False],
'debugger_type': [
_DebuggerConfig.DEBUGGER_TYPE_INT_PER_LAYER,
_DebuggerConfig.DEBUGGER_TYPE_FLOAT_PER_LAYER,
],
'target_opset': [quant_opts_pb2.XLA, quant_opts_pb2.STABLEHLO],
}])
)
def test_conv2d_ptq_model_per_layer_verify(
self,
activation_fn: Optional[ops.Operation],
has_bias: bool,
debugger_type: _DebuggerConfig.DebuggerType,
target_opset: quant_opts_pb2.OpSet,
):
# TODO: b/326114903 - Support dynamic input dimensions after 0th rank in
# op_set=STABLEHLO.
input_shape_dynamic = target_opset != quant_opts_pb2.STABLEHLO
concrete_input_shape = [None, 3, 4, 3]
input_shape = (
[None, None, None, 3] if input_shape_dynamic else concrete_input_shape
)
filter_shape = [2, 3, 3, 2]
model = self._create_conv2d_model(
input_shape,
filter_shape,
activation_fn=activation_fn,
has_bias=has_bias,
)
saved_model_save.save(model, self._input_saved_model_path)
def data_gen() -> repr_dataset.RepresentativeDataset:
data_input_size = [1] + concrete_input_shape[1:]
for _ in range(8):
yield {
'input_tensor': ops.convert_to_tensor(
np.random.uniform(low=0, high=150, size=data_input_size).astype(
'f4'
)
),
}
tags = {tag_constants.SERVING}
log_dir_path = self.create_tempdir().full_path
quantization_options = quant_opts_pb2.QuantizationOptions(
quantization_method=quant_opts_pb2.QuantizationMethod(
preset_method=_PresetMethod.METHOD_STATIC_RANGE_INT8
),
op_set=target_opset,
debugger_config=_DebuggerConfig(
debugger_type=debugger_type,
log_dir_path=log_dir_path,
),
tags=tags,
signature_keys=['serving_default'],
)
converted_model = quantize_model.quantize(
self._input_saved_model_path,
self._output_saved_model_path,
quantization_options,
representative_dataset=data_gen(),
)
self.assertIsNotNone(converted_model)
self.assertCountEqual(
converted_model.signatures._signatures.keys(), {'serving_default'}
)
sample_input_size = [16] + concrete_input_shape[1:]
sample_inputs = [
{
'input_tensor': np.random.uniform(
low=0, high=1, size=sample_input_size
)
},
{
'input_tensor': np.random.uniform(
low=0, high=1, size=sample_input_size
)
},
]
output_value_from_original_model = self._run_model_in_sess(
self._input_saved_model_path, tags, 'serving_default', sample_inputs
)
output_value_from_debugging_model = self._run_model_in_sess(
self._output_saved_model_path, tags, 'serving_default', sample_inputs
)
# Find the both quantized and unquantized dump file.
folder = os.path.join(log_dir_path, os.listdir(log_dir_path)[0])
unquantized_dump_file_path = os.path.join(
log_dir_path, folder, 'unquantized_tensor_data.pb'
)
quantized_dump_file_path = os.path.join(
log_dir_path, folder, 'quantized_tensor_data.pb'
)
unquantized_dump_file_numpy = self._read_tensor_array_file(
unquantized_dump_file_path
)
quantized_dump_file_numpy = self._read_tensor_array_file(
quantized_dump_file_path
)
# Since the model only has one conv2d and its output is directly used as
# the output of the model, output of the model and conv2d's dump value
# should be the same.
self.assertAllClose(
output_value_from_original_model, unquantized_dump_file_numpy
)
# The output_value_from_debugging_model of DEBUGGER_TYPE_INT_PER_LAYER is
# a quantized value, while for DEBUGGER_TYPE_FLOAT_PER_LAYER, it's an
# unquantized value. Therefore there are different verifications for the
# output value.
if debugger_type == _DebuggerConfig.DEBUGGER_TYPE_INT_PER_LAYER:
self.assertAllClose(
output_value_from_debugging_model, quantized_dump_file_numpy
)
else: # debugger_type == _DebuggerConfig.DEBUGGER_TYPE_FLOAT_PER_LAYER:
self.assertAllClose(
output_value_from_debugging_model, output_value_from_original_model
)
# Verify if quant_unit.pb file was created correctly.
quant_unit_file_path = os.path.join(log_dir_path, folder, 'quant_unit.pb')
quant_unit = (
quant_opts_pb2.UnitWiseQuantizationSpec.QuantizationUnit.FromString(
open(quant_unit_file_path, 'rb').read()
)
)
if target_opset == quant_opts_pb2.XLA:
self.assertEqual(quant_unit.node_name, 'Conv2D')
self.assertRegex(quant_unit.func_name, r'^__inference_conv_\d+')
elif target_opset == quant_opts_pb2.STABLEHLO:
self.assertEqual(quant_unit.node_name, '_empty_node')
self.assertRegex(
quant_unit.func_name, r'^composite_conv_([a-zA-Z_0-9]+_)*fn_\d+'
)
else:
assert False, f'Please add assertion for the op_set: {target_opset}.'
@test_util.run_all_in_graph_and_eager_modes
| DebuggerTest |
python | allegroai__clearml | clearml/storage/callbacks.py | {
"start": 4972,
"end": 6892
} | class ____(ProgressReport):
def __init__(
self,
filename: str,
verbose: bool,
total_size: float,
log: logging.Logger,
report_chunk_size_mb: Optional[int] = None,
report_start: Optional[bool] = None,
) -> None:
report_chunk_size_mb = (
report_chunk_size_mb
if report_chunk_size_mb is not None
else ProgressReport.report_upload_chunk_size_mb
or int(config.get("storage.log.report_upload_chunk_size_mb", 5))
)
super(UploadProgressReport, self).__init__(
verbose,
total_size,
log,
report_chunk_size_mb,
description_prefix="Uploading",
description_suffix="from {}".format(filename),
report_start=report_start,
)
self._filename = filename
@classmethod
def from_stream(
cls,
stream: IO[AnyStr],
filename: str,
verbose: bool,
log: logging.Logger,
) -> Optional["UploadProgressReport"]:
if hasattr(stream, "seek"):
total_size_mb = cls._get_stream_length(stream) // (1024 * 1024)
return UploadProgressReport(filename, verbose, total_size_mb, log)
@classmethod
def from_file(cls, filename: str, verbose: bool, log: logging.Logger) -> "UploadProgressReport":
total_size_mb = float(os.path.getsize(filename)) / (1024.0 * 1024.0)
return UploadProgressReport(filename, verbose, total_size_mb, log)
@staticmethod
def _get_stream_length(stream: IO[AnyStr]) -> int:
current_position = stream.tell()
# seek to end of file
stream.seek(0, 2)
total_length = stream.tell()
# seek back to current position to support
# partially read file-like objects
stream.seek(current_position or 0)
return total_length
| UploadProgressReport |
python | mwaskom__seaborn | seaborn/axisgrid.py | {
"start": 12712,
"end": 43514
} | class ____(Grid):
"""Multi-plot grid for plotting conditional relationships."""
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
super().__init__()
data = handle_data_source(data)
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
This class maps a dataset onto multiple axes arrayed in a grid of rows
and columns that correspond to *levels* of variables in the dataset.
The plots it produces are often called "lattice", "trellis", or
"small-multiple" graphics.
It can also represent levels of a third variable with the ``hue``
parameter, which plots different subsets of data in different colors.
This uses color to resolve elements on a third dimension, but only
draws subsets on top of each other and will not tailor the ``hue``
parameter for the specific visualization the way that axes-level
functions that accept ``hue`` will.
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
.. warning::
When using seaborn functions that infer semantic mappings from a
dataset, care must be taken to synchronize those mappings across
facets (e.g., by defining the ``hue`` mapping with a palette dict or
setting the data type of the variables to ``category``). In most cases,
it will be better to use a figure-level function (e.g. :func:`relplot`
or :func:`catplot`) than to use :class:`FacetGrid` directly.
See the :ref:`tutorial <grid_tutorial>` for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``{{var}}_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{height}
{aspect}
{palette}
{{row,col,hue}}_order : lists
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True).
subplot_kws : dict
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict
Dictionary of keyword arguments passed to
:class:`matplotlib.gridspec.GridSpec`
(via :meth:`matplotlib.figure.Figure.subplots`).
Ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships
relplot : Combine a relational plot and a :class:`FacetGrid`
displot : Combine a distribution plot and a :class:`FacetGrid`
catplot : Combine a categorical plot and a :class:`FacetGrid`
lmplot : Combine a regression plot and a :class:`FacetGrid`
Examples
--------
.. note::
These examples use seaborn functions to demonstrate some of the
advanced features of the class, but in most cases you will want
to use figue-level functions (e.g. :func:`displot`, :func:`relplot`)
to make the plots shown here.
.. include:: ../docstrings/FacetGrid.rst
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.items()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [label.get_text() for label in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [label.get_text() for label in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None, **kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the column variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
return self
# ------ Properties that are part of the public API and documented by Sphinx
@property
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
@property
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
@property
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
# ------ Private properties, that require some computation to get
@property
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i % self._ncol
and i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i >= (self._ncol * (self._nrow - 1))
or i >= (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
| FacetGrid |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/shrinker.py | {
"start": 3140,
"end": 73872
} | class ____:
"""A shrinker is a child object of a ConjectureRunner which is designed to
manage the associated state of a particular shrink problem. That is, we
have some initial ConjectureData object and some property of interest
that it satisfies, and we want to find a ConjectureData object with a
shortlex (see sort_key above) smaller choice sequence that exhibits the same
property.
Currently the only property of interest we use is that the status is
INTERESTING and the interesting_origin takes on some fixed value, but we
may potentially be interested in other use cases later.
However we assume that data with a status < VALID never satisfies the predicate.
The shrinker keeps track of a value shrink_target which represents the
current best known ConjectureData object satisfying the predicate.
It refines this value by repeatedly running *shrink passes*, which are
methods that perform a series of transformations to the current shrink_target
and evaluate the underlying test function to find new ConjectureData
objects. If any of these satisfy the predicate, the shrink_target
is updated automatically. Shrinking runs until no shrink pass can
improve the shrink_target, at which point it stops. It may also be
terminated if the underlying engine throws RunIsComplete, but that
is handled by the calling code rather than the Shrinker.
=======================
Designing Shrink Passes
=======================
Generally a shrink pass is just any function that calls
cached_test_function and/or consider_new_nodes a number of times,
but there are a couple of useful things to bear in mind.
A shrink pass *makes progress* if running it changes self.shrink_target
(i.e. it tries a shortlex smaller ConjectureData object satisfying
the predicate). The desired end state of shrinking is to find a
value such that no shrink pass can make progress, i.e. that we
are at a local minimum for each shrink pass.
In aid of this goal, the main invariant that a shrink pass much
satisfy is that whether it makes progress must be deterministic.
It is fine (encouraged even) for the specific progress it makes
to be non-deterministic, but if you run a shrink pass, it makes
no progress, and then you immediately run it again, it should
never succeed on the second time. This allows us to stop as soon
as we have run each shrink pass and seen no progress on any of
them.
This means that e.g. it's fine to try each of N deletions
or replacements in a random order, but it's not OK to try N random
deletions (unless you have already shrunk at least once, though we
don't currently take advantage of this loophole).
Shrink passes need to be written so as to be robust against
change in the underlying shrink target. It is generally safe
to assume that the shrink target does not change prior to the
point of first modification - e.g. if you change no bytes at
index ``i``, all spans whose start is ``<= i`` still exist,
as do all blocks, and the data object is still of length
``>= i + 1``. This can only be violated by bad user code which
relies on an external source of non-determinism.
When the underlying shrink_target changes, shrink
passes should not run substantially more test_function calls
on success than they do on failure. Say, no more than a constant
factor more. In particular shrink passes should not iterate to a
fixed point.
This means that shrink passes are often written with loops that
are carefully designed to do the right thing in the case that no
shrinks occurred and try to adapt to any changes to do a reasonable
job. e.g. say we wanted to write a shrink pass that tried deleting
each individual choice (this isn't an especially good pass,
but it leads to a simple illustrative example), we might do it
by iterating over the choice sequence like so:
.. code-block:: python
i = 0
while i < len(self.shrink_target.nodes):
if not self.consider_new_nodes(
self.shrink_target.nodes[:i] + self.shrink_target.nodes[i + 1 :]
):
i += 1
The reason for writing the loop this way is that i is always a
valid index into the current choice sequence, even if the current sequence
changes as a result of our actions. When the choice sequence changes,
we leave the index where it is rather than restarting from the
beginning, and carry on. This means that the number of steps we
run in this case is always bounded above by the number of steps
we would run if nothing works.
Another thing to bear in mind about shrink pass design is that
they should prioritise *progress*. If you have N operations that
you need to run, you should try to order them in such a way as
to avoid stalling, where you have long periods of test function
invocations where no shrinks happen. This is bad because whenever
we shrink we reduce the amount of work the shrinker has to do
in future, and often speed up the test function, so we ideally
wanted those shrinks to happen much earlier in the process.
Sometimes stalls are inevitable of course - e.g. if the pass
makes no progress, then the entire thing is just one long stall,
but it's helpful to design it so that stalls are less likely
in typical behaviour.
The two easiest ways to do this are:
* Just run the N steps in random order. As long as a
reasonably large proportion of the operations succeed, this
guarantees the expected stall length is quite short. The
book keeping for making sure this does the right thing when
it succeeds can be quite annoying.
* When you have any sort of nested loop, loop in such a way
that both loop variables change each time. This prevents
stalls which occur when one particular value for the outer
loop is impossible to make progress on, rendering the entire
inner loop into a stall.
However, although progress is good, too much progress can be
a bad sign! If you're *only* seeing successful reductions,
that's probably a sign that you are making changes that are
too timid. Two useful things to offset this:
* It's worth writing shrink passes which are *adaptive*, in
the sense that when operations seem to be working really
well we try to bundle multiple of them together. This can
often be used to turn what would be O(m) successful calls
into O(log(m)).
* It's often worth trying one or two special minimal values
before trying anything more fine grained (e.g. replacing
the whole thing with zero).
"""
def derived_value(fn):
"""It's useful during shrinking to have access to derived values of
the current shrink target.
This decorator allows you to define these as cached properties. They
are calculated once, then cached until the shrink target changes, then
recalculated the next time they are used."""
def accept(self):
try:
return self.__derived_values[fn.__name__]
except KeyError:
return self.__derived_values.setdefault(fn.__name__, fn(self))
accept.__name__ = fn.__name__
return property(accept)
def __init__(
self,
engine: "ConjectureRunner",
initial: ConjectureData | ConjectureResult,
predicate: ShrinkPredicateT | None,
*,
allow_transition: (
Callable[[ConjectureData | ConjectureResult, ConjectureData], bool] | None
),
explain: bool,
in_target_phase: bool = False,
):
"""Create a shrinker for a particular engine, with a given starting
point and predicate. When shrink() is called it will attempt to find an
example for which predicate is True and which is strictly smaller than
initial.
Note that initial is a ConjectureData object, and predicate
takes ConjectureData objects.
"""
assert predicate is not None or allow_transition is not None
self.engine = engine
self.__predicate = predicate or (lambda data: True)
self.__allow_transition = allow_transition or (lambda source, destination: True)
self.__derived_values: dict = {}
self.initial_size = len(initial.choices)
# We keep track of the current best example on the shrink_target
# attribute.
self.shrink_target = initial
self.clear_change_tracking()
self.shrinks = 0
# We terminate shrinks that seem to have reached their logical
# conclusion: If we've called the underlying test function at
# least self.max_stall times since the last time we shrunk,
# it's time to stop shrinking.
self.max_stall = 200
self.initial_calls = self.engine.call_count
self.initial_misaligned = self.engine.misaligned_count
self.calls_at_last_shrink = self.initial_calls
self.shrink_passes: list[ShrinkPass] = [
ShrinkPass(self.try_trivial_spans),
self.node_program("X" * 5),
self.node_program("X" * 4),
self.node_program("X" * 3),
self.node_program("X" * 2),
self.node_program("X" * 1),
ShrinkPass(self.pass_to_descendant),
ShrinkPass(self.reorder_spans),
ShrinkPass(self.minimize_duplicated_choices),
ShrinkPass(self.minimize_individual_choices),
ShrinkPass(self.redistribute_numeric_pairs),
ShrinkPass(self.lower_integers_together),
ShrinkPass(self.lower_duplicated_characters),
]
# Because the shrinker is also used to `pareto_optimise` in the target phase,
# we sometimes want to allow extending buffers instead of aborting at the end.
self.__extend: Literal["full"] | int = "full" if in_target_phase else 0
self.should_explain = explain
@derived_value # type: ignore
def cached_calculations(self):
return {}
def cached(self, *keys):
def accept(f):
cache_key = (f.__name__, *keys)
try:
return self.cached_calculations[cache_key]
except KeyError:
return self.cached_calculations.setdefault(cache_key, f())
return accept
@property
def calls(self) -> int:
"""Return the number of calls that have been made to the underlying
test function."""
return self.engine.call_count
@property
def misaligned(self) -> int:
return self.engine.misaligned_count
def check_calls(self) -> None:
if self.calls - self.calls_at_last_shrink >= self.max_stall:
raise StopShrinking
def cached_test_function(
self, nodes: Sequence[ChoiceNode]
) -> tuple[bool, ConjectureResult | _Overrun | None]:
nodes = nodes[: len(self.nodes)]
if startswith(nodes, self.nodes):
return (True, None)
if sort_key(self.nodes) < sort_key(nodes):
return (False, None)
# sometimes our shrinking passes try obviously invalid things. We handle
# discarding them in one place here.
if any(not choice_permitted(node.value, node.constraints) for node in nodes):
return (False, None)
result = self.engine.cached_test_function(
[n.value for n in nodes], extend=self.__extend
)
previous = self.shrink_target
self.incorporate_test_data(result)
self.check_calls()
return (previous is not self.shrink_target, result)
def consider_new_nodes(self, nodes: Sequence[ChoiceNode]) -> bool:
return self.cached_test_function(nodes)[0]
def incorporate_test_data(self, data):
"""Takes a ConjectureData or Overrun object updates the current
shrink_target if this data represents an improvement over it."""
if data.status < Status.VALID or data is self.shrink_target:
return
if (
self.__predicate(data)
and sort_key(data.nodes) < sort_key(self.shrink_target.nodes)
and self.__allow_transition(self.shrink_target, data)
):
self.update_shrink_target(data)
def debug(self, msg: str) -> None:
self.engine.debug(msg)
@property
def random(self) -> "Random":
return self.engine.random
def shrink(self) -> None:
"""Run the full set of shrinks and update shrink_target.
This method is "mostly idempotent" - calling it twice is unlikely to
have any effect, though it has a non-zero probability of doing so.
"""
try:
self.initial_coarse_reduction()
self.greedy_shrink()
except StopShrinking:
# If we stopped shrinking because we're making slow progress (instead of
# reaching a local optimum), don't run the explain-phase logic.
self.should_explain = False
finally:
if self.engine.report_debug_info:
def s(n):
return "s" if n != 1 else ""
total_deleted = self.initial_size - len(self.shrink_target.choices)
calls = self.engine.call_count - self.initial_calls
misaligned = self.engine.misaligned_count - self.initial_misaligned
self.debug(
"---------------------\n"
"Shrink pass profiling\n"
"---------------------\n\n"
f"Shrinking made a total of {calls} call{s(calls)} of which "
f"{self.shrinks} shrank and {misaligned} were misaligned. This "
f"deleted {total_deleted} choices out of {self.initial_size}."
)
for useful in [True, False]:
self.debug("")
if useful:
self.debug("Useful passes:")
else:
self.debug("Useless passes:")
self.debug("")
for pass_ in sorted(
self.shrink_passes,
key=lambda t: (-t.calls, t.deletions, t.shrinks),
):
if pass_.calls == 0:
continue
if (pass_.shrinks != 0) != useful:
continue
self.debug(
f" * {pass_.name} made {pass_.calls} call{s(pass_.calls)} of which "
f"{pass_.shrinks} shrank and {pass_.misaligned} were misaligned, "
f"deleting {pass_.deletions} choice{s(pass_.deletions)}."
)
self.debug("")
self.explain()
def explain(self) -> None:
if not self.should_explain or not self.shrink_target.arg_slices:
return
self.max_stall = 2**100
shrink_target = self.shrink_target
nodes = self.nodes
choices = self.choices
chunks: dict[tuple[int, int], list[tuple[ChoiceT, ...]]] = defaultdict(list)
# Before we start running experiments, let's check for known inputs which would
# make them redundant. The shrinking process means that we've already tried many
# variations on the minimal example, so this can save a lot of time.
seen_passing_seq = self.engine.passing_choice_sequences(
prefix=self.nodes[: min(self.shrink_target.arg_slices)[0]]
)
# Now that we've shrunk to a minimal failing example, it's time to try
# varying each part that we've noted will go in the final report. Consider
# slices in largest-first order
for start, end in sorted(
self.shrink_target.arg_slices, key=lambda x: (-(x[1] - x[0]), x)
):
# Check for any previous examples that match the prefix and suffix,
# so we can skip if we found a passing example while shrinking.
if any(
startswith(seen, nodes[:start]) and endswith(seen, nodes[end:])
for seen in seen_passing_seq
):
continue
# Run our experiments
n_same_failures = 0
note = "or any other generated value"
# TODO: is 100 same-failures out of 500 attempts a good heuristic?
for n_attempt in range(500): # pragma: no branch
# no-branch here because we don't coverage-test the abort-at-500 logic.
if n_attempt - 10 > n_same_failures * 5:
# stop early if we're seeing mostly invalid examples
break # pragma: no cover
# replace start:end with random values
replacement = []
for i in range(start, end):
node = nodes[i]
if not node.was_forced:
value = draw_choice(
node.type, node.constraints, random=self.random
)
node = node.copy(with_value=value)
replacement.append(node.value)
attempt = choices[:start] + tuple(replacement) + choices[end:]
result = self.engine.cached_test_function(attempt, extend="full")
if result.status is Status.OVERRUN:
continue # pragma: no cover # flakily covered
result = cast(ConjectureResult, result)
if not (
len(attempt) == len(result.choices)
and endswith(result.nodes, nodes[end:])
):
# Turns out this was a variable-length part, so grab the infix...
for span1, span2 in zip(
shrink_target.spans, result.spans, strict=False
):
assert span1.start == span2.start
assert span1.start <= start
assert span1.label == span2.label
if span1.start == start and span1.end == end:
result_end = span2.end
break
else:
raise NotImplementedError("Expected matching prefixes")
attempt = (
choices[:start]
+ result.choices[start:result_end]
+ choices[end:]
)
chunks[(start, end)].append(result.choices[start:result_end])
result = self.engine.cached_test_function(attempt)
if result.status is Status.OVERRUN:
continue # pragma: no cover # flakily covered
result = cast(ConjectureResult, result)
else:
chunks[(start, end)].append(result.choices[start:end])
if shrink_target is not self.shrink_target: # pragma: no cover
# If we've shrunk further without meaning to, bail out.
self.shrink_target.slice_comments.clear()
return
if result.status is Status.VALID:
# The test passed, indicating that this param can't vary freely.
# However, it's really hard to write a simple and reliable covering
# test, because of our `seen_passing_buffers` check above.
break # pragma: no cover
if self.__predicate(result): # pragma: no branch
n_same_failures += 1
if n_same_failures >= 100:
self.shrink_target.slice_comments[(start, end)] = note
break
# Finally, if we've found multiple independently-variable parts, check whether
# they can all be varied together.
if len(self.shrink_target.slice_comments) <= 1:
return
n_same_failures_together = 0
chunks_by_start_index = sorted(chunks.items())
for _ in range(500): # pragma: no branch
# no-branch here because we don't coverage-test the abort-at-500 logic.
new_choices: list[ChoiceT] = []
prev_end = 0
for (start, end), ls in chunks_by_start_index:
assert prev_end <= start < end, "these chunks must be nonoverlapping"
new_choices.extend(choices[prev_end:start])
new_choices.extend(self.random.choice(ls))
prev_end = end
result = self.engine.cached_test_function(new_choices)
# This *can't* be a shrink because none of the components were.
assert shrink_target is self.shrink_target
if result.status == Status.VALID:
self.shrink_target.slice_comments[(0, 0)] = (
"The test sometimes passed when commented parts were varied together."
)
break # Test passed, this param can't vary freely.
if self.__predicate(result): # pragma: no branch
n_same_failures_together += 1
if n_same_failures_together >= 100:
self.shrink_target.slice_comments[(0, 0)] = (
"The test always failed when commented parts were varied together."
)
break
def greedy_shrink(self) -> None:
"""Run a full set of greedy shrinks (that is, ones that will only ever
move to a better target) and update shrink_target appropriately.
This method iterates to a fixed point and so is idempontent - calling
it twice will have exactly the same effect as calling it once.
"""
self.fixate_shrink_passes(self.shrink_passes)
def initial_coarse_reduction(self):
"""Performs some preliminary reductions that should not be
repeated as part of the main shrink passes.
The main reason why these can't be included as part of shrink
passes is that they have much more ability to make the test
case "worse". e.g. they might rerandomise part of it, significantly
increasing the value of individual nodes, which works in direct
opposition to the lexical shrinking and will frequently undo
its work.
"""
self.reduce_each_alternative()
@derived_value # type: ignore
def spans_starting_at(self):
result = [[] for _ in self.shrink_target.nodes]
for i, ex in enumerate(self.spans):
# We can have zero-length spans that start at the end
if ex.start < len(result):
result[ex.start].append(i)
return tuple(map(tuple, result))
def reduce_each_alternative(self):
"""This is a pass that is designed to rerandomise use of the
one_of strategy or things that look like it, in order to try
to move from later strategies to earlier ones in the branch
order.
It does this by trying to systematically lower each value it
finds that looks like it might be the branch decision for
one_of, and then attempts to repair any changes in shape that
this causes.
"""
i = 0
while i < len(self.shrink_target.nodes):
nodes = self.shrink_target.nodes
node = nodes[i]
if (
node.type == "integer"
and not node.was_forced
and node.value <= 10
and node.constraints["min_value"] == 0
):
assert isinstance(node.value, int)
# We've found a plausible candidate for a ``one_of`` choice.
# We now want to see if the shape of the test case actually depends
# on it. If it doesn't, then we don't need to do this (comparatively
# costly) pass, and can let much simpler lexicographic reduction
# handle it later.
#
# We test this by trying to set the value to zero and seeing if the
# shape changes, as measured by either changing the number of subsequent
# nodes, or changing the nodes in such a way as to cause one of the
# previous values to no longer be valid in its position.
zero_attempt = self.cached_test_function(
nodes[:i] + (nodes[i].copy(with_value=0),) + nodes[i + 1 :]
)[1]
if (
zero_attempt is not self.shrink_target
and zero_attempt is not None
and zero_attempt.status >= Status.VALID
):
changed_shape = len(zero_attempt.nodes) != len(nodes)
if not changed_shape:
for j in range(i + 1, len(nodes)):
zero_node = zero_attempt.nodes[j]
orig_node = nodes[j]
if (
zero_node.type != orig_node.type
or not choice_permitted(
orig_node.value, zero_node.constraints
)
):
changed_shape = True
break
if changed_shape:
for v in range(node.value):
if self.try_lower_node_as_alternative(i, v):
break
i += 1
def try_lower_node_as_alternative(self, i, v):
"""Attempt to lower `self.shrink_target.nodes[i]` to `v`,
while rerandomising and attempting to repair any subsequent
changes to the shape of the test case that this causes."""
nodes = self.shrink_target.nodes
if self.consider_new_nodes(
nodes[:i] + (nodes[i].copy(with_value=v),) + nodes[i + 1 :]
):
return True
prefix = nodes[:i] + (nodes[i].copy(with_value=v),)
initial = self.shrink_target
spans = self.spans_starting_at[i]
for _ in range(3):
random_attempt = self.engine.cached_test_function(
[n.value for n in prefix], extend=len(nodes)
)
if random_attempt.status < Status.VALID:
continue
self.incorporate_test_data(random_attempt)
for j in spans:
initial_span = initial.spans[j]
attempt_span = random_attempt.spans[j]
contents = random_attempt.nodes[attempt_span.start : attempt_span.end]
self.consider_new_nodes(
nodes[:i] + contents + nodes[initial_span.end :]
)
if initial is not self.shrink_target:
return True
return False
@derived_value # type: ignore
def shrink_pass_choice_trees(self) -> dict[Any, ChoiceTree]:
return defaultdict(ChoiceTree)
def step(self, shrink_pass: ShrinkPass, *, random_order: bool = False) -> bool:
tree = self.shrink_pass_choice_trees[shrink_pass]
if tree.exhausted:
return False
initial_shrinks = self.shrinks
initial_calls = self.calls
initial_misaligned = self.misaligned
size = len(self.shrink_target.choices)
assert shrink_pass.name is not None
self.engine.explain_next_call_as(shrink_pass.name)
if random_order:
selection_order = random_selection_order(self.random)
else:
selection_order = prefix_selection_order(shrink_pass.last_prefix)
try:
shrink_pass.last_prefix = tree.step(
selection_order,
lambda chooser: shrink_pass.function(chooser),
)
finally:
shrink_pass.calls += self.calls - initial_calls
shrink_pass.misaligned += self.misaligned - initial_misaligned
shrink_pass.shrinks += self.shrinks - initial_shrinks
shrink_pass.deletions += size - len(self.shrink_target.choices)
self.engine.clear_call_explanation()
return True
def fixate_shrink_passes(self, passes: list[ShrinkPass]) -> None:
"""Run steps from each pass in ``passes`` until the current shrink target
is a fixed point of all of them."""
any_ran = True
while any_ran:
any_ran = False
reordering = {}
# We run remove_discarded after every pass to do cleanup
# keeping track of whether that actually works. Either there is
# no discarded data and it is basically free, or it reliably works
# and deletes data, or it doesn't work. In that latter case we turn
# it off for the rest of this loop through the passes, but will
# try again once all of the passes have been run.
can_discard = self.remove_discarded()
calls_at_loop_start = self.calls
# We keep track of how many calls can be made by a single step
# without making progress and use this to test how much to pad
# out self.max_stall by as we go along.
max_calls_per_failing_step = 1
for sp in passes:
if can_discard:
can_discard = self.remove_discarded()
before_sp = self.shrink_target
# Run the shrink pass until it fails to make any progress
# max_failures times in a row. This implicitly boosts shrink
# passes that are more likely to work.
failures = 0
max_failures = 20
while failures < max_failures:
# We don't allow more than max_stall consecutive failures
# to shrink, but this means that if we're unlucky and the
# shrink passes are in a bad order where only the ones at
# the end are useful, if we're not careful this heuristic
# might stop us before we've tried everything. In order to
# avoid that happening, we make sure that there's always
# plenty of breathing room to make it through a single
# iteration of the fixate_shrink_passes loop.
self.max_stall = max(
self.max_stall,
2 * max_calls_per_failing_step
+ (self.calls - calls_at_loop_start),
)
prev = self.shrink_target
initial_calls = self.calls
# It's better for us to run shrink passes in a deterministic
# order, to avoid repeat work, but this can cause us to create
# long stalls when there are a lot of steps which fail to do
# anything useful. In order to avoid this, once we've noticed
# we're in a stall (i.e. half of max_failures calls have failed
# to do anything) we switch to randomly jumping around. If we
# find a success then we'll resume deterministic order from
# there which, with any luck, is in a new good region.
if not self.step(sp, random_order=failures >= max_failures // 2):
# step returns False when there is nothing to do because
# the entire choice tree is exhausted. If this happens
# we break because we literally can't run this pass any
# more than we already have until something else makes
# progress.
break
any_ran = True
# Don't count steps that didn't actually try to do
# anything as failures. Otherwise, this call is a failure
# if it failed to make any changes to the shrink target.
if initial_calls != self.calls:
if prev is not self.shrink_target:
failures = 0
else:
max_calls_per_failing_step = max(
max_calls_per_failing_step, self.calls - initial_calls
)
failures += 1
# We reorder the shrink passes so that on our next run through
# we try good ones first. The rule is that shrink passes that
# did nothing useful are the worst, shrink passes that reduced
# the length are the best.
if self.shrink_target is before_sp:
reordering[sp] = 1
elif len(self.choices) < len(before_sp.choices):
reordering[sp] = -1
else:
reordering[sp] = 0
passes.sort(key=reordering.__getitem__)
@property
def nodes(self) -> tuple[ChoiceNode, ...]:
return self.shrink_target.nodes
@property
def choices(self) -> tuple[ChoiceT, ...]:
return self.shrink_target.choices
@property
def spans(self) -> Spans:
return self.shrink_target.spans
@derived_value # type: ignore
def spans_by_label(self):
"""
A mapping of labels to a list of spans with that label. Spans in the list
are ordered by their normal index order.
"""
spans_by_label = defaultdict(list)
for ex in self.spans:
spans_by_label[ex.label].append(ex)
return dict(spans_by_label)
@derived_value # type: ignore
def distinct_labels(self):
return sorted(self.spans_by_label, key=str)
def pass_to_descendant(self, chooser):
"""Attempt to replace each span with a descendant span.
This is designed to deal with strategies that call themselves
recursively. For example, suppose we had:
binary_tree = st.deferred(
lambda: st.one_of(
st.integers(), st.tuples(binary_tree, binary_tree)))
This pass guarantees that we can replace any binary tree with one of
its subtrees - each of those will create an interval that the parent
could validly be replaced with, and this pass will try doing that.
This is pretty expensive - it takes O(len(intervals)^2) - so we run it
late in the process when we've got the number of intervals as far down
as possible.
"""
label = chooser.choose(
self.distinct_labels, lambda l: len(self.spans_by_label[l]) >= 2
)
spans = self.spans_by_label[label]
i = chooser.choose(range(len(spans) - 1))
ancestor = spans[i]
if i + 1 == len(spans) or spans[i + 1].start >= ancestor.end:
return
@self.cached(label, i)
def descendants():
lo = i + 1
hi = len(spans)
while lo + 1 < hi:
mid = (lo + hi) // 2
if spans[mid].start >= ancestor.end:
hi = mid
else:
lo = mid
return [
span
for span in spans[i + 1 : hi]
if span.choice_count < ancestor.choice_count
]
descendant = chooser.choose(descendants, lambda ex: ex.choice_count > 0)
assert ancestor.start <= descendant.start
assert ancestor.end >= descendant.end
assert descendant.choice_count < ancestor.choice_count
self.consider_new_nodes(
self.nodes[: ancestor.start]
+ self.nodes[descendant.start : descendant.end]
+ self.nodes[ancestor.end :]
)
def lower_common_node_offset(self):
"""Sometimes we find ourselves in a situation where changes to one part
of the choice sequence unlock changes to other parts. Sometimes this is
good, but sometimes this can cause us to exhibit exponential slow
downs!
e.g. suppose we had the following:
m = draw(integers(min_value=0))
n = draw(integers(min_value=0))
assert abs(m - n) > 1
If this fails then we'll end up with a loop where on each iteration we
reduce each of m and n by 2 - m can't go lower because of n, then n
can't go lower because of m.
This will take us O(m) iterations to complete, which is exponential in
the data size, as we gradually zig zag our way towards zero.
This can only happen if we're failing to reduce the size of the choice
sequence: The number of iterations that reduce the length of the choice
sequence is bounded by that length.
So what we do is this: We keep track of which nodes are changing, and
then if there's some non-zero common offset to them we try and minimize
them all at once by lowering that offset.
This may not work, and it definitely won't get us out of all possible
exponential slow downs (an example of where it doesn't is where the
shape of the nodes changes as a result of this bouncing behaviour),
but it fails fast when it doesn't work and gets us out of a really
nastily slow case when it does.
"""
if len(self.__changed_nodes) <= 1:
return
changed = []
for i in sorted(self.__changed_nodes):
node = self.nodes[i]
if node.trivial or node.type != "integer":
continue
changed.append(node)
if not changed:
return
ints = [
abs(node.value - node.constraints["shrink_towards"]) for node in changed
]
offset = min(ints)
assert offset > 0
for i in range(len(ints)):
ints[i] -= offset
st = self.shrink_target
def offset_node(node, n):
return (
node.index,
node.index + 1,
[node.copy(with_value=node.constraints["shrink_towards"] + n)],
)
def consider(n, sign):
return self.consider_new_nodes(
replace_all(
st.nodes,
[
offset_node(node, sign * (n + v))
for node, v in zip(changed, ints, strict=False)
],
)
)
# shrink from both sides
Integer.shrink(offset, lambda n: consider(n, 1))
Integer.shrink(offset, lambda n: consider(n, -1))
self.clear_change_tracking()
def clear_change_tracking(self):
self.__last_checked_changed_at = self.shrink_target
self.__all_changed_nodes = set()
def mark_changed(self, i):
self.__changed_nodes.add(i)
@property
def __changed_nodes(self) -> set[int]:
if self.__last_checked_changed_at is self.shrink_target:
return self.__all_changed_nodes
prev_target = self.__last_checked_changed_at
new_target = self.shrink_target
assert prev_target is not new_target
prev_nodes = prev_target.nodes
new_nodes = new_target.nodes
assert sort_key(new_target.nodes) < sort_key(prev_target.nodes)
if len(prev_nodes) != len(new_nodes) or any(
n1.type != n2.type for n1, n2 in zip(prev_nodes, new_nodes, strict=True)
):
# should we check constraints are equal as well?
self.__all_changed_nodes = set()
else:
assert len(prev_nodes) == len(new_nodes)
for i, (n1, n2) in enumerate(zip(prev_nodes, new_nodes, strict=True)):
assert n1.type == n2.type
if not choice_equal(n1.value, n2.value):
self.__all_changed_nodes.add(i)
return self.__all_changed_nodes
def update_shrink_target(self, new_target):
assert isinstance(new_target, ConjectureResult)
self.shrinks += 1
# If we are just taking a long time to shrink we don't want to
# trigger this heuristic, so whenever we shrink successfully
# we give ourselves a bit of breathing room to make sure we
# would find a shrink that took that long to find the next time.
# The case where we're taking a long time but making steady
# progress is handled by `finish_shrinking_deadline` in engine.py
self.max_stall = max(
self.max_stall, (self.calls - self.calls_at_last_shrink) * 2
)
self.calls_at_last_shrink = self.calls
self.shrink_target = new_target
self.__derived_values = {}
def try_shrinking_nodes(self, nodes, n):
"""Attempts to replace each node in the nodes list with n. Returns
True if it succeeded (which may include some additional modifications
to shrink_target).
In current usage it is expected that each of the nodes currently have
the same value and choice_type, although this is not essential. Note that
n must be < the node at min(nodes) or this is not a valid shrink.
This method will attempt to do some small amount of work to delete data
that occurs after the end of the nodes. This is useful for cases where
there is some size dependency on the value of a node.
"""
# If the length of the shrink target has changed from under us such that
# the indices are out of bounds, give up on the replacement.
# TODO_BETTER_SHRINK: we probably want to narrow down the root cause here at some point.
if any(node.index >= len(self.nodes) for node in nodes):
return # pragma: no cover
initial_attempt = replace_all(
self.nodes,
[(node.index, node.index + 1, [node.copy(with_value=n)]) for node in nodes],
)
attempt = self.cached_test_function(initial_attempt)[1]
if attempt is None:
return False
if attempt is self.shrink_target:
# if the initial shrink was a success, try lowering offsets.
self.lower_common_node_offset()
return True
# If this produced something completely invalid we ditch it
# here rather than trying to persevere.
if attempt.status is Status.OVERRUN:
return False
if attempt.status is Status.INVALID:
return False
if attempt.misaligned_at is not None:
# we're invalid due to a misalignment in the tree. We'll try to fix
# a very specific type of misalignment here: where we have a node of
# {"size": n} and tried to draw the same node, but with {"size": m < n}.
# This can occur with eg
#
# n = data.draw_integer()
# s = data.draw_string(min_size=n)
#
# where we try lowering n, resulting in the test_function drawing a lower
# min_size than our attempt had for the draw_string node.
#
# We'll now try realigning this tree by:
# * replacing the constraints in our attempt with what test_function tried
# to draw in practice
# * truncating the value of that node to match min_size
#
# This helps in the specific case of drawing a value and then drawing
# a collection of that size...and not much else. In practice this
# helps because this antipattern is fairly common.
# TODO we'll probably want to apply the same trick as in the valid
# case of this function of preserving from the right instead of
# preserving from the left. see test_can_shrink_variable_string_draws.
(index, attempt_choice_type, attempt_constraints, _attempt_forced) = (
attempt.misaligned_at
)
node = self.nodes[index]
if node.type != attempt_choice_type:
return False # pragma: no cover
if node.was_forced:
return False # pragma: no cover
if node.type in {"string", "bytes"}:
# if the size *increased*, we would have to guess what to pad with
# in order to try fixing up this attempt. Just give up.
if node.constraints["min_size"] <= attempt_constraints["min_size"]:
# attempts which increase min_size tend to overrun rather than
# be misaligned, making a covering case difficult.
return False # pragma: no cover
# the size decreased in our attempt. Try again, but truncate the value
# to that size by removing any elements past min_size.
return self.consider_new_nodes(
initial_attempt[: node.index]
+ [
initial_attempt[node.index].copy(
with_constraints=attempt_constraints,
with_value=initial_attempt[node.index].value[
: attempt_constraints["min_size"]
],
)
]
+ initial_attempt[node.index :]
)
lost_nodes = len(self.nodes) - len(attempt.nodes)
if lost_nodes <= 0:
return False
start = nodes[0].index
end = nodes[-1].index + 1
# We now look for contiguous regions to delete that might help fix up
# this failed shrink. We only look for contiguous regions of the right
# lengths because doing anything more than that starts to get very
# expensive. See minimize_individual_choices for where we
# try to be more aggressive.
regions_to_delete = {(end, end + lost_nodes)}
for ex in self.spans:
if ex.start > start:
continue
if ex.end <= end:
continue
if ex.index >= len(attempt.spans):
continue # pragma: no cover
replacement = attempt.spans[ex.index]
in_original = [c for c in ex.children if c.start >= end]
in_replaced = [c for c in replacement.children if c.start >= end]
if len(in_replaced) >= len(in_original) or not in_replaced:
continue
# We've found a span where some of the children went missing
# as a result of this change, and just replacing it with the data
# it would have had and removing the spillover didn't work. This
# means that some of its children towards the right must be
# important, so we try to arrange it so that it retains its
# rightmost children instead of its leftmost.
regions_to_delete.add(
(in_original[0].start, in_original[-len(in_replaced)].start)
)
for u, v in sorted(regions_to_delete, key=lambda x: x[1] - x[0], reverse=True):
try_with_deleted = initial_attempt[:u] + initial_attempt[v:]
if self.consider_new_nodes(try_with_deleted):
return True
return False
def remove_discarded(self):
"""Try removing all bytes marked as discarded.
This is primarily to deal with data that has been ignored while
doing rejection sampling - e.g. as a result of an integer range, or a
filtered strategy.
Such data will also be handled by the adaptive_example_deletion pass,
but that pass is necessarily more conservative and will try deleting
each interval individually. The common case is that all data drawn and
rejected can just be thrown away immediately in one block, so this pass
will be much faster than trying each one individually when it works.
returns False if there is discarded data and removing it does not work,
otherwise returns True.
"""
while self.shrink_target.has_discards:
discarded = []
for ex in self.shrink_target.spans:
if (
ex.choice_count > 0
and ex.discarded
and (not discarded or ex.start >= discarded[-1][-1])
):
discarded.append((ex.start, ex.end))
# This can happen if we have discards but they are all of
# zero length. This shouldn't happen very often so it's
# faster to check for it here than at the point of example
# generation.
if not discarded:
break
attempt = list(self.nodes)
for u, v in reversed(discarded):
del attempt[u:v]
if not self.consider_new_nodes(tuple(attempt)):
return False
return True
@derived_value # type: ignore
def duplicated_nodes(self):
"""Returns a list of nodes grouped (choice_type, value)."""
duplicates = defaultdict(list)
for node in self.nodes:
duplicates[(node.type, choice_key(node.value))].append(node)
return list(duplicates.values())
def node_program(self, program: str) -> ShrinkPass:
return ShrinkPass(
lambda chooser: self._node_program(chooser, program),
name=f"node_program_{program}",
)
def _node_program(self, chooser, program):
n = len(program)
# Adaptively attempt to run the node program at the current
# index. If this successfully applies the node program ``k`` times
# then this runs in ``O(log(k))`` test function calls.
i = chooser.choose(range(len(self.nodes) - n + 1))
# First, run the node program at the chosen index. If this fails,
# don't do any extra work, so that failure is as cheap as possible.
if not self.run_node_program(i, program, original=self.shrink_target):
return
# Because we run in a random order we will often find ourselves in the middle
# of a region where we could run the node program. We thus start by moving
# left to the beginning of that region if possible in order to to start from
# the beginning of that region.
def offset_left(k):
return i - k * n
i = offset_left(
find_integer(
lambda k: self.run_node_program(
offset_left(k), program, original=self.shrink_target
)
)
)
original = self.shrink_target
# Now try to run the node program multiple times here.
find_integer(
lambda k: self.run_node_program(i, program, original=original, repeats=k)
)
def minimize_duplicated_choices(self, chooser):
"""Find choices that have been duplicated in multiple places and attempt
to minimize all of the duplicates simultaneously.
This lets us handle cases where two values can't be shrunk
independently of each other but can easily be shrunk together.
For example if we had something like:
ls = data.draw(lists(integers()))
y = data.draw(integers())
assert y not in ls
Suppose we drew y = 3 and after shrinking we have ls = [3]. If we were
to replace both 3s with 0, this would be a valid shrink, but if we were
to replace either 3 with 0 on its own the test would start passing.
It is also useful for when that duplication is accidental and the value
of the choices don't matter very much because it allows us to replace
more values at once.
"""
nodes = chooser.choose(self.duplicated_nodes)
# we can't lower any nodes which are trivial. try proceeding with the
# remaining nodes.
nodes = [node for node in nodes if not node.trivial]
if len(nodes) <= 1:
return
self.minimize_nodes(nodes)
def redistribute_numeric_pairs(self, chooser):
"""If there is a sum of generated numbers that we need their sum
to exceed some bound, lowering one of them requires raising the
other. This pass enables that."""
# look for a pair of nodes (node1, node2) which are both numeric
# and aren't separated by too many other nodes. We'll decrease node1 and
# increase node2 (note that the other way around doesn't make sense as
# it's strictly worse in the ordering).
def can_choose_node(node):
# don't choose nan, inf, or floats above the threshold where f + 1 > f
# (which is not necessarily true for floats above MAX_PRECISE_INTEGER).
# The motivation for the last condition is to avoid trying weird
# non-shrinks where we raise one node and think we lowered another
# (but didn't).
return node.type in {"integer", "float"} and not (
node.type == "float"
and (math.isnan(node.value) or abs(node.value) >= MAX_PRECISE_INTEGER)
)
node1 = chooser.choose(
self.nodes,
lambda node: can_choose_node(node) and not node.trivial,
)
node2 = chooser.choose(
self.nodes,
lambda node: can_choose_node(node)
# Note that it's fine for node2 to be trivial, because we're going to
# explicitly make it *not* trivial by adding to its value.
and not node.was_forced
# to avoid quadratic behavior, scan ahead only a small amount for
# the related node.
and node1.index < node.index <= node1.index + 4,
)
m: int | float = node1.value
n: int | float = node2.value
def boost(k: int) -> bool:
# floats always shrink towards 0
shrink_towards = (
node1.constraints["shrink_towards"] if node1.type == "integer" else 0
)
if k > abs(m - shrink_towards):
return False
# We are trying to move node1 (m) closer to shrink_towards, and node2
# (n) farther away from shrink_towards. If m is below shrink_towards,
# we want to add to m and subtract from n, and vice versa if above
# shrink_towards.
if m < shrink_towards:
k = -k
try:
v1 = m - k
v2 = n + k
except OverflowError: # pragma: no cover
# if n or m is a float and k is over sys.float_info.max, coercing
# k to a float will overflow.
return False
# if we've increased node2 to the point that we're past max precision,
# give up - things have become too unstable.
if node1.type == "float" and abs(v2) >= MAX_PRECISE_INTEGER:
return False
return self.consider_new_nodes(
self.nodes[: node1.index]
+ (node1.copy(with_value=v1),)
+ self.nodes[node1.index + 1 : node2.index]
+ (node2.copy(with_value=v2),)
+ self.nodes[node2.index + 1 :]
)
find_integer(boost)
def lower_integers_together(self, chooser):
node1 = chooser.choose(
self.nodes, lambda n: n.type == "integer" and not n.trivial
)
# Search up to 3 nodes ahead, to avoid quadratic time.
node2 = self.nodes[
chooser.choose(
range(node1.index + 1, min(len(self.nodes), node1.index + 3 + 1)),
lambda i: self.nodes[i].type == "integer"
and not self.nodes[i].was_forced,
)
]
# one might expect us to require node2 to be nontrivial, and to minimize
# the node which is closer to its shrink_towards, rather than node1
# unconditionally. In reality, it's acceptable for us to transition node2
# from trivial to nontrivial, because the shrink ordering is dominated by
# the complexity of the earlier node1. What matters is minimizing node1.
shrink_towards = node1.constraints["shrink_towards"]
def consider(n):
return self.consider_new_nodes(
self.nodes[: node1.index]
+ (node1.copy(with_value=node1.value - n),)
+ self.nodes[node1.index + 1 : node2.index]
+ (node2.copy(with_value=node2.value - n),)
+ self.nodes[node2.index + 1 :]
)
find_integer(lambda n: consider(shrink_towards - n))
find_integer(lambda n: consider(n - shrink_towards))
def lower_duplicated_characters(self, chooser):
"""
Select two string choices no more than 4 choices apart and simultaneously
lower characters which appear in both strings. This helps cases where the
same character must appear in two strings, but the actual value of the
character is not relevant.
This shrinking pass currently only tries lowering *all* instances of the
duplicated character in both strings. So for instance, given two choices:
"bbac"
"abbb"
we would try lowering all five of the b characters simultaneously. This
may fail to shrink some cases where only certain character indices are
correlated, for instance if only the b at index 1 could be lowered
simultaneously and the rest did in fact actually have to be a `b`.
It would be nice to try shrinking that case as well, but we would need good
safeguards because it could get very expensive to try all combinations.
I expect lowering all duplicates to handle most cases in the meantime.
"""
node1 = chooser.choose(
self.nodes, lambda n: n.type == "string" and not n.trivial
)
# limit search to up to 4 choices ahead, to avoid quadratic behavior
node2 = self.nodes[
chooser.choose(
range(node1.index + 1, min(len(self.nodes), node1.index + 1 + 4)),
lambda i: self.nodes[i].type == "string" and not self.nodes[i].trivial
# select nodes which have at least one of the same character present
and set(node1.value) & set(self.nodes[i].value),
)
]
duplicated_characters = set(node1.value) & set(node2.value)
# deterministic ordering
char = chooser.choose(sorted(duplicated_characters))
intervals = node1.constraints["intervals"]
def copy_node(node, n):
# replace all duplicate characters in each string. This might miss
# some shrinks compared to only replacing some, but trying all possible
# combinations of indices could get expensive if done without some
# thought.
return node.copy(
with_value=node.value.replace(char, intervals.char_in_shrink_order(n))
)
Integer.shrink(
intervals.index_from_char_in_shrink_order(char),
lambda n: self.consider_new_nodes(
self.nodes[: node1.index]
+ (copy_node(node1, n),)
+ self.nodes[node1.index + 1 : node2.index]
+ (copy_node(node2, n),)
+ self.nodes[node2.index + 1 :]
),
)
def minimize_nodes(self, nodes):
choice_type = nodes[0].type
value = nodes[0].value
# unlike choice_type and value, constraints are *not* guaranteed to be equal among all
# passed nodes. We arbitrarily use the constraints of the first node. I think
# this is unsound (= leads to us trying shrinks that could not have been
# generated), but those get discarded at test-time, and this enables useful
# slips where constraints are not equal but are close enough that doing the
# same operation on both basically just works.
constraints = nodes[0].constraints
assert all(
node.type == choice_type and choice_equal(node.value, value)
for node in nodes
)
if choice_type == "integer":
shrink_towards = constraints["shrink_towards"]
# try shrinking from both sides towards shrink_towards.
# we're starting from n = abs(shrink_towards - value). Because the
# shrinker will not check its starting value, we need to try
# shrinking to n first.
self.try_shrinking_nodes(nodes, abs(shrink_towards - value))
Integer.shrink(
abs(shrink_towards - value),
lambda n: self.try_shrinking_nodes(nodes, shrink_towards + n),
)
Integer.shrink(
abs(shrink_towards - value),
lambda n: self.try_shrinking_nodes(nodes, shrink_towards - n),
)
elif choice_type == "float":
self.try_shrinking_nodes(nodes, abs(value))
Float.shrink(
abs(value),
lambda val: self.try_shrinking_nodes(nodes, val),
)
Float.shrink(
abs(value),
lambda val: self.try_shrinking_nodes(nodes, -val),
)
elif choice_type == "boolean":
# must be True, otherwise would be trivial and not selected.
assert value is True
# only one thing to try: false!
self.try_shrinking_nodes(nodes, False)
elif choice_type == "bytes":
Bytes.shrink(
value,
lambda val: self.try_shrinking_nodes(nodes, val),
min_size=constraints["min_size"],
)
elif choice_type == "string":
String.shrink(
value,
lambda val: self.try_shrinking_nodes(nodes, val),
intervals=constraints["intervals"],
min_size=constraints["min_size"],
)
else:
raise NotImplementedError
def try_trivial_spans(self, chooser):
i = chooser.choose(range(len(self.spans)))
prev = self.shrink_target
nodes = self.shrink_target.nodes
span = self.spans[i]
prefix = nodes[: span.start]
replacement = tuple(
[
(
node
if node.was_forced
else node.copy(
with_value=choice_from_index(0, node.type, node.constraints)
)
)
for node in nodes[span.start : span.end]
]
)
suffix = nodes[span.end :]
attempt = self.cached_test_function(prefix + replacement + suffix)[1]
if self.shrink_target is not prev:
return
if isinstance(attempt, ConjectureResult):
new_span = attempt.spans[i]
new_replacement = attempt.nodes[new_span.start : new_span.end]
self.consider_new_nodes(prefix + new_replacement + suffix)
def minimize_individual_choices(self, chooser):
"""Attempt to minimize each choice in sequence.
This is the pass that ensures that e.g. each integer we draw is a
minimum value. So it's the part that guarantees that if we e.g. do
x = data.draw(integers())
assert x < 10
then in our shrunk example, x = 10 rather than say 97.
If we are unsuccessful at minimizing a choice of interest we then
check if that's because it's changing the size of the test case and,
if so, we also make an attempt to delete parts of the test case to
see if that fixes it.
We handle most of the common cases in try_shrinking_nodes which is
pretty good at clearing out large contiguous blocks of dead space,
but it fails when there is data that has to stay in particular places
in the list.
"""
node = chooser.choose(self.nodes, lambda node: not node.trivial)
initial_target = self.shrink_target
self.minimize_nodes([node])
if self.shrink_target is not initial_target:
# the shrink target changed, so our shrink worked. Defer doing
# anything more intelligent until this shrink fails.
return
# the shrink failed. One particularly common case where minimizing a
# node can fail is the antipattern of drawing a size and then drawing a
# collection of that size, or more generally when there is a size
# dependency on some single node. We'll explicitly try and fix up this
# common case here: if decreasing an integer node by one would reduce
# the size of the generated input, we'll try deleting things after that
# node and see if the resulting attempt works.
if node.type != "integer":
# Only try this fixup logic on integer draws. Almost all size
# dependencies are on integer draws, and if it's not, it's doing
# something convoluted enough that it is unlikely to shrink well anyway.
# TODO: extent to floats? we probably currently fail on the following,
# albeit convoluted example:
# n = int(data.draw(st.floats()))
# s = data.draw(st.lists(st.integers(), min_size=n, max_size=n))
return
lowered = (
self.nodes[: node.index]
+ (node.copy(with_value=node.value - 1),)
+ self.nodes[node.index + 1 :]
)
attempt = self.cached_test_function(lowered)[1]
if (
attempt is None
or attempt.status < Status.VALID
or len(attempt.nodes) == len(self.nodes)
or len(attempt.nodes) == node.index + 1
):
# no point in trying our size-dependency-logic if our attempt at
# lowering the node resulted in:
# * an invalid conjecture data
# * the same number of nodes as before
# * no nodes beyond the lowered node (nothing to try to delete afterwards)
return
# If it were then the original shrink should have worked and we could
# never have got here.
assert attempt is not self.shrink_target
@self.cached(node.index)
def first_span_after_node():
lo = 0
hi = len(self.spans)
while lo + 1 < hi:
mid = (lo + hi) // 2
span = self.spans[mid]
if span.start >= node.index:
hi = mid
else:
lo = mid
return hi
# we try deleting both entire spans, and single nodes.
# If we wanted to get more aggressive, we could try deleting n
# consecutive nodes (that don't cross a span boundary) for say
# n <= 2 or n <= 3.
if chooser.choose([True, False]):
span = self.spans[
chooser.choose(
range(first_span_after_node, len(self.spans)),
lambda i: self.spans[i].choice_count > 0,
)
]
self.consider_new_nodes(lowered[: span.start] + lowered[span.end :])
else:
node = self.nodes[chooser.choose(range(node.index + 1, len(self.nodes)))]
self.consider_new_nodes(lowered[: node.index] + lowered[node.index + 1 :])
def reorder_spans(self, chooser):
"""This pass allows us to reorder the children of each span.
For example, consider the following:
.. code-block:: python
import hypothesis.strategies as st
from hypothesis import given
@given(st.text(), st.text())
def test_not_equal(x, y):
assert x != y
Without the ability to reorder x and y this could fail either with
``x=""``, ``y="0"``, or the other way around. With reordering it will
reliably fail with ``x=""``, ``y="0"``.
"""
span = chooser.choose(self.spans)
label = chooser.choose(span.children).label
spans = [c for c in span.children if c.label == label]
if len(spans) <= 1:
return
endpoints = [(span.start, span.end) for span in spans]
st = self.shrink_target
Ordering.shrink(
range(len(spans)),
lambda indices: self.consider_new_nodes(
replace_all(
st.nodes,
[
(
u,
v,
st.nodes[spans[i].start : spans[i].end],
)
for (u, v), i in zip(endpoints, indices, strict=True)
],
)
),
key=lambda i: sort_key(st.nodes[spans[i].start : spans[i].end]),
)
def run_node_program(self, i, program, original, repeats=1):
"""Node programs are a mini-DSL for node rewriting, defined as a sequence
of commands that can be run at some index into the nodes
Commands are:
* "X", delete this node
This method runs the node program in ``program`` at node index
``i`` on the ConjectureData ``original``. If ``repeats > 1`` then it
will attempt to approximate the results of running it that many times.
Returns True if this successfully changes the underlying shrink target,
else False.
"""
if i + len(program) > len(original.nodes) or i < 0:
return False
attempt = list(original.nodes)
for _ in range(repeats):
for k, command in reversed(list(enumerate(program))):
j = i + k
if j >= len(attempt):
return False
if command == "X":
del attempt[j]
else:
raise NotImplementedError(f"Unrecognised command {command!r}")
return self.consider_new_nodes(attempt)
| Shrinker |
python | getsentry__sentry | tests/sentry/auth/test_access.py | {
"start": 43094,
"end": 43709
} | class ____(TestCase):
def test_system_access(self) -> None:
org = self.create_organization()
team = self.create_team(organization=org)
project = self.create_project(teams=[team])
result = access.SystemAccess()
assert not result.sso_is_valid
assert not result.requires_sso
assert result.has_project_access(project)
assert result.has_any_project_scope(project, "project:read")
assert not result.has_team_membership(team)
assert result.has_scope("project:read")
assert result.has_team_access(team)
@no_silo_test
| SystemAccessTest |
python | lepture__authlib | tests/clients/asgi_helper.py | {
"start": 126,
"end": 1101
} | class ____:
def __init__(self, body=b"", status_code=200, headers=None, assert_func=None):
if headers is None:
headers = {}
if isinstance(body, dict):
body = json.dumps(body).encode()
headers["Content-Type"] = "application/json"
else:
if isinstance(body, str):
body = body.encode()
headers["Content-Type"] = "application/x-www-form-urlencoded"
self.body = body
self.status_code = status_code
self.headers = headers
self.assert_func = assert_func
async def __call__(self, scope, receive, send):
request = ASGIRequest(scope, receive=receive)
if self.assert_func:
await self.assert_func(request)
response = ASGIResponse(
status_code=self.status_code,
content=self.body,
headers=self.headers,
)
await response(scope, receive, send)
| AsyncMockDispatch |
python | sqlalchemy__sqlalchemy | examples/dogpile_caching/model.py | {
"start": 1057,
"end": 1464
} | class ____(Base):
__tablename__ = "postal_code"
id = Column(Integer, primary_key=True)
code = Column(String(10), nullable=False)
city_id = Column(Integer, ForeignKey("city.id"), nullable=False)
city = relationship(City)
@property
def country(self):
return self.city.country
def __init__(self, code, city):
self.code = code
self.city = city
| PostalCode |
python | python__mypy | mypy/report.py | {
"start": 4466,
"end": 4702
} | class ____(TraverserVisitor):
def __init__(self) -> None:
super().__init__()
self.counts = [0, 0]
def visit_func_def(self, defn: FuncDef) -> None:
self.counts[defn.type is not None] += 1
| FuncCounterVisitor |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 61364,
"end": 62436
} | class ____(Operation):
def __init__(self, x_min, x_max, *, name=None):
super().__init__(name=name)
self.x_min = x_min
self.x_max = x_max
def call(self, x):
return backend.numpy.clip(x, self.x_min, self.x_max)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(x.dtype)
if dtype == "bool":
dtype = "int32"
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.clip", "keras.ops.numpy.clip"])
def clip(x, x_min, x_max):
"""Clip (limit) the values in a tensor.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of `[0, 1]` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Args:
x: Input tensor.
x_min: Minimum value.
x_max: Maximum value.
Returns:
The clipped tensor.
"""
if any_symbolic_tensors((x,)):
return Clip(x_min, x_max).symbolic_call(x)
return backend.numpy.clip(x, x_min, x_max)
| Clip |
python | python-jsonschema__jsonschema | jsonschema/tests/test_validators.py | {
"start": 78451,
"end": 87535
} | class ____(TestCase):
base_uri = ""
stored_uri = "foo://stored"
stored_schema = {"stored": "schema"}
def setUp(self):
self.referrer = {}
self.store = {self.stored_uri: self.stored_schema}
self.resolver = validators._RefResolver(
self.base_uri, self.referrer, self.store,
)
def test_it_does_not_retrieve_schema_urls_from_the_network(self):
ref = validators.Draft3Validator.META_SCHEMA["id"]
with mock.patch.object(self.resolver, "resolve_remote") as patched: # noqa: SIM117
with self.resolver.resolving(ref) as resolved:
pass
self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA)
self.assertFalse(patched.called)
def test_it_resolves_local_refs(self):
ref = "#/properties/foo"
self.referrer["properties"] = {"foo": object()}
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, self.referrer["properties"]["foo"])
def test_it_resolves_local_refs_with_id(self):
schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}}
resolver = validators._RefResolver.from_schema(
schema,
id_of=lambda schema: schema.get("id", ""),
)
with resolver.resolving("#/a") as resolved:
self.assertEqual(resolved, schema["a"])
with resolver.resolving("http://bar/schema#/a") as resolved:
self.assertEqual(resolved, schema["a"])
def test_it_retrieves_stored_refs(self):
with self.resolver.resolving(self.stored_uri) as resolved:
self.assertIs(resolved, self.stored_schema)
self.resolver.store["cached_ref"] = {"foo": 12}
with self.resolver.resolving("cached_ref#/foo") as resolved:
self.assertEqual(resolved, 12)
def test_it_retrieves_unstored_refs_via_requests(self):
ref = "http://bar#baz"
schema = {"baz": 12}
if "requests" in sys.modules: # pragma: no cover
self.addCleanup(
sys.modules.__setitem__, "requests", sys.modules["requests"],
)
sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema})
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, 12)
def test_it_retrieves_unstored_refs_via_urlopen(self):
ref = "http://bar#baz"
schema = {"baz": 12}
if "requests" in sys.modules: # pragma: no cover
self.addCleanup(
sys.modules.__setitem__, "requests", sys.modules["requests"],
)
sys.modules["requests"] = None
@contextmanager
def fake_urlopen(url):
self.assertEqual(url, "http://bar")
yield BytesIO(json.dumps(schema).encode("utf8"))
with mock.patch("urllib.request.urlopen", new=fake_urlopen): # noqa: SIM117
with self.resolver.resolving(ref) as resolved:
pass
self.assertEqual(resolved, 12)
def test_it_retrieves_local_refs_via_urlopen(self):
with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf:
self.addCleanup(os.remove, tempf.name)
json.dump({"foo": "bar"}, tempf)
ref = f"file://{pathname2url(tempf.name)}#foo"
with self.resolver.resolving(ref) as resolved:
self.assertEqual(resolved, "bar")
def test_it_can_construct_a_base_uri_from_a_schema(self):
schema = {"id": "foo"}
resolver = validators._RefResolver.from_schema(
schema,
id_of=lambda schema: schema.get("id", ""),
)
self.assertEqual(resolver.base_uri, "foo")
self.assertEqual(resolver.resolution_scope, "foo")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("foo#") as resolved:
self.assertEqual(resolved, schema)
def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
schema = {}
resolver = validators._RefResolver.from_schema(schema)
self.assertEqual(resolver.base_uri, "")
self.assertEqual(resolver.resolution_scope, "")
with resolver.resolving("") as resolved:
self.assertEqual(resolved, schema)
with resolver.resolving("#") as resolved:
self.assertEqual(resolved, schema)
def test_custom_uri_scheme_handlers(self):
def handler(url):
self.assertEqual(url, ref)
return schema
schema = {"foo": "bar"}
ref = "foo://bar"
resolver = validators._RefResolver("", {}, handlers={"foo": handler})
with resolver.resolving(ref) as resolved:
self.assertEqual(resolved, schema)
def test_cache_remote_on(self):
response = [object()]
def handler(url):
try:
return response.pop()
except IndexError: # pragma: no cover
self.fail("Response must not have been cached!")
ref = "foo://bar"
resolver = validators._RefResolver(
"", {}, cache_remote=True, handlers={"foo": handler},
)
with resolver.resolving(ref):
pass
with resolver.resolving(ref):
pass
def test_cache_remote_off(self):
response = [object()]
def handler(url):
try:
return response.pop()
except IndexError: # pragma: no cover
self.fail("Handler called twice!")
ref = "foo://bar"
resolver = validators._RefResolver(
"", {}, cache_remote=False, handlers={"foo": handler},
)
with resolver.resolving(ref):
pass
def test_if_you_give_it_junk_you_get_a_resolution_error(self):
error = ValueError("Oh no! What's this?")
def handler(url):
raise error
ref = "foo://bar"
resolver = validators._RefResolver("", {}, handlers={"foo": handler})
with self.assertRaises(exceptions._RefResolutionError) as err: # noqa: SIM117
with resolver.resolving(ref):
self.fail("Shouldn't get this far!") # pragma: no cover
self.assertEqual(err.exception, exceptions._RefResolutionError(error))
def test_helpful_error_message_on_failed_pop_scope(self):
resolver = validators._RefResolver("", {})
resolver.pop_scope()
with self.assertRaises(exceptions._RefResolutionError) as exc:
resolver.pop_scope()
self.assertIn("Failed to pop the scope", str(exc.exception))
def test_pointer_within_schema_with_different_id(self):
"""
See #1085.
"""
schema = validators.Draft7Validator.META_SCHEMA
one = validators._RefResolver("", schema)
validator = validators.Draft7Validator(schema, resolver=one)
self.assertFalse(validator.is_valid({"maxLength": "foo"}))
another = {
"allOf": [{"$ref": validators.Draft7Validator.META_SCHEMA["$id"]}],
}
two = validators._RefResolver("", another)
validator = validators.Draft7Validator(another, resolver=two)
self.assertFalse(validator.is_valid({"maxLength": "foo"}))
def test_newly_created_validator_with_ref_resolver(self):
"""
See https://github.com/python-jsonschema/jsonschema/issues/1061#issuecomment-1624266555.
"""
def handle(uri):
self.assertEqual(uri, "http://example.com/foo")
return {"type": "integer"}
resolver = validators._RefResolver("", {}, handlers={"http": handle})
Validator = validators.create(
meta_schema={},
validators=validators.Draft4Validator.VALIDATORS,
)
schema = {"$id": "http://example.com/bar", "$ref": "foo"}
validator = Validator(schema, resolver=resolver)
self.assertEqual(
(validator.is_valid({}), validator.is_valid(37)),
(False, True),
)
def test_refresolver_with_pointer_in_schema_with_no_id(self):
"""
See https://github.com/python-jsonschema/jsonschema/issues/1124#issuecomment-1632574249.
"""
schema = {
"properties": {"x": {"$ref": "#/definitions/x"}},
"definitions": {"x": {"type": "integer"}},
}
validator = validators.Draft202012Validator(
schema,
resolver=validators._RefResolver("", schema),
)
self.assertEqual(
(validator.is_valid({"x": "y"}), validator.is_valid({"x": 37})),
(False, True),
)
def sorted_errors(errors):
def key(error):
return (
[str(e) for e in error.path],
[str(e) for e in error.schema_path],
)
return sorted(errors, key=key)
@define
| TestRefResolver |
python | django__django | django/core/management/commands/squashmigrations.py | {
"start": 492,
"end": 10131
} | class ____(BaseCommand):
help = (
"Squashes an existing set of migrations (from first until specified) into a "
"single new one."
)
def add_arguments(self, parser):
parser.add_argument(
"app_label",
help="App label of the application to squash migrations for.",
)
parser.add_argument(
"start_migration_name",
nargs="?",
help=(
"Migrations will be squashed starting from and including this "
"migration."
),
)
parser.add_argument(
"migration_name",
help="Migrations will be squashed until and including this migration.",
)
parser.add_argument(
"--no-optimize",
action="store_true",
help="Do not try to optimize the squashed operations.",
)
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--squashed-name",
help="Sets the name of the new squashed migration.",
)
parser.add_argument(
"--no-header",
action="store_false",
dest="include_header",
help="Do not add a header comment to the new squashed migration.",
)
def handle(self, **options):
self.verbosity = options["verbosity"]
self.interactive = options["interactive"]
app_label = options["app_label"]
start_migration_name = options["start_migration_name"]
migration_name = options["migration_name"]
no_optimize = options["no_optimize"]
squashed_name = options["squashed_name"]
include_header = options["include_header"]
# Validate app_label.
try:
apps.get_app_config(app_label)
except LookupError as err:
raise CommandError(str(err))
# Load the current graph state, check the app and migration they asked
# for exists.
loader = MigrationLoader(None)
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
migration = self.find_migration(loader, app_label, migration_name)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan(
(migration.app_label, migration.name)
)
if al == migration.app_label
]
if start_migration_name:
start_migration = self.find_migration(
loader, app_label, start_migration_name
)
start = loader.get_migration(
start_migration.app_label, start_migration.name
)
try:
start_index = migrations_to_squash.index(start)
migrations_to_squash = migrations_to_squash[start_index:]
except ValueError:
raise CommandError(
"The migration '%s' cannot be found. Maybe it comes after "
"the migration '%s'?\n"
"Have a look at:\n"
" python manage.py showmigrations %s\n"
"to debug this issue." % (start_migration, migration, app_label)
)
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(
self.style.MIGRATE_HEADING("Will squash the following migrations:")
)
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = input("Do you wish to proceed? [y/N] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
# We need to take all dependencies from the first migration in the list
# as it may be 0002 depending on 0001
first_migration = True
for smigration in migrations_to_squash:
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label or first_migration:
dependencies.add(dependency)
first_migration = False
if no_optimize:
if self.verbosity > 0:
self.stdout.write(
self.style.MIGRATE_HEADING("(Skipping optimization.)")
)
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations."
% (len(operations), len(new_operations))
)
replaces = [(m.app_label, m.name) for m in migrations_to_squash]
# Make a new migration with those operations
subclass = type(
"Migration",
(migrations.Migration,),
{
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
},
)
if start_migration_name:
if squashed_name:
# Use the name from --squashed-name.
prefix, _ = start_migration.name.split("_", 1)
name = "%s_%s" % (prefix, squashed_name)
else:
# Generate a name.
name = "%s_squashed_%s" % (start_migration.name, migration.name)
new_migration = subclass(name, app_label)
else:
name = "0001_%s" % (squashed_name or "squashed_%s" % migration.name)
new_migration = subclass(name, app_label)
new_migration.initial = True
# Write out the new migration file
writer = MigrationWriter(new_migration, include_header)
if os.path.exists(writer.path):
raise CommandError(
f"Migration {new_migration.name} already exists. Use a different name."
)
with open(writer.path, "w", encoding="utf-8") as fh:
fh.write(writer.as_string())
run_formatters([writer.path], stderr=self.stderr)
if self.verbosity > 0:
self.stdout.write(
self.style.MIGRATE_HEADING(
"Created new squashed migration %s" % writer.path
)
+ "\n"
" You should commit this migration but leave the old ones in place;\n"
" the new migration will be used for new installs. Once you are sure\n"
" all instances of the codebase have applied the migrations you "
"squashed,\n"
" you can delete them."
)
if writer.needs_manual_porting:
self.stdout.write(
self.style.MIGRATE_HEADING("Manual porting required") + "\n"
" Your migrations contained functions that must be manually "
"copied over,\n"
" as we could not safely copy their implementation.\n"
" See the comment at the top of the squashed migration for "
"details."
)
if shutil.which("black"):
self.stdout.write(
self.style.WARNING(
"Squashed migration couldn't be formatted using the "
'"black" command. You can call it manually.'
)
)
def find_migration(self, loader, app_label, name):
try:
return loader.get_migration_by_prefix(app_label, name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'."
% (name, app_label)
)
| Command |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 20078,
"end": 21209
} | class ____(themeable):
"""
Plot title
Parameters
----------
theme_element : element_text
Notes
-----
The default horizontal alignment for the title is center. However the
title will be left aligned if and only if there is a subtitle and its
horizontal alignment has not been set (so it defaults to the left).
The defaults ensure that, short titles are not awkwardly left-aligned,
and that a title and a subtitle will not be awkwardly mis-aligned in
the center or with different alignments.
"""
_omit = ["margin"]
def apply_figure(self, figure: Figure, targets: ThemeTargets):
super().apply_figure(figure, targets)
if text := targets.plot_title:
props = self.properties
# ha can be a float and is handled by the layout manager
with suppress(KeyError):
del props["ha"]
text.set(**props)
def blank_figure(self, figure: Figure, targets: ThemeTargets):
super().blank_figure(figure, targets)
if text := targets.plot_title:
text.set_visible(False)
| plot_title |
python | django__django | django/http/multipartparser.py | {
"start": 818,
"end": 1044
} | class ____(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
FIELD_TYPES = frozenset([FIELD, RAW])
MAX_TOTAL_HEADER_SIZE = 1024
| InputStreamExhausted |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/8_Actor_Critic_Advantage/AC_continue_Pendulum.py | {
"start": 3073,
"end": 6388
} | class ____(object):
def __init__(self, sess, n_features, lr=0.01):
self.sess = sess
with tf.name_scope('inputs'):
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.v_ = tf.placeholder(tf.float32, [1, 1], name="v_next")
self.r = tf.placeholder(tf.float32, name='r')
with tf.variable_scope('Critic'):
l1 = tf.layers.dense(
inputs=self.s,
units=30, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1'
)
self.v = tf.layers.dense(
inputs=l1,
units=1, # output units
activation=None,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='V'
)
with tf.variable_scope('squared_TD_error'):
self.td_error = tf.reduce_mean(self.r + GAMMA * self.v_ - self.v)
self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
def learn(self, s, r, s_):
s, s_ = s[np.newaxis, :], s_[np.newaxis, :]
v_ = self.sess.run(self.v, {self.s: s_})
td_error, _ = self.sess.run([self.td_error, self.train_op],
{self.s: s, self.v_: v_, self.r: r})
return td_error
OUTPUT_GRAPH = False
MAX_EPISODE = 1000
MAX_EP_STEPS = 200
DISPLAY_REWARD_THRESHOLD = -100 # renders environment if total episode reward is greater then this threshold
RENDER = False # rendering wastes time
GAMMA = 0.9
LR_A = 0.001 # learning rate for actor
LR_C = 0.01 # learning rate for critic
env = gym.make('Pendulum-v0')
env.seed(1) # reproducible
env = env.unwrapped
N_S = env.observation_space.shape[0]
A_BOUND = env.action_space.high
sess = tf.Session()
actor = Actor(sess, n_features=N_S, lr=LR_A, action_bound=[-A_BOUND, A_BOUND])
critic = Critic(sess, n_features=N_S, lr=LR_C)
sess.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
tf.summary.FileWriter("logs/", sess.graph)
for i_episode in range(MAX_EPISODE):
s = env.reset()
t = 0
ep_rs = []
while True:
# if RENDER:
env.render()
a = actor.choose_action(s)
s_, r, done, info = env.step(a)
r /= 10
td_error = critic.learn(s, r, s_) # gradient = grad[r + gamma * V(s_) - V(s)]
actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]
s = s_
t += 1
ep_rs.append(r)
if t > MAX_EP_STEPS:
ep_rs_sum = sum(ep_rs)
if 'running_reward' not in globals():
running_reward = ep_rs_sum
else:
running_reward = running_reward * 0.9 + ep_rs_sum * 0.1
if running_reward > DISPLAY_REWARD_THRESHOLD: RENDER = True # rendering
print("episode:", i_episode, " reward:", int(running_reward))
break
| Critic |
python | doocs__leetcode | solution/0900-0999/0930.Binary Subarrays With Sum/Solution2.py | {
"start": 0,
"end": 479
} | class ____:
def numSubarraysWithSum(self, nums: List[int], goal: int) -> int:
i1 = i2 = s1 = s2 = j = ans = 0
n = len(nums)
while j < n:
s1 += nums[j]
s2 += nums[j]
while i1 <= j and s1 > goal:
s1 -= nums[i1]
i1 += 1
while i2 <= j and s2 >= goal:
s2 -= nums[i2]
i2 += 1
ans += i2 - i1
j += 1
return ans
| Solution |
python | bokeh__bokeh | src/bokeh/models/ui/icons.py | {
"start": 2518,
"end": 3545
} | class ____(Icon):
""" Built-in icons included with BokehJS. """
# explicit __init__ to support Init signatures
def __init__(self, icon_name: Init[str] = Intrinsic, **kwargs: Any) -> None:
super().__init__(icon_name=icon_name, **kwargs)
icon_name = Required(Either(Enum(ToolIcon), String), help="""
The name of a built-in icon to use. Currently, the following icon names are
supported: ``"help"``, ``"question-mark"``, ``"settings"``, ``"x"``
.. bokeh-plot::
:source-position: none
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models import BuiltinIcon, Button
builtin_icons = ["help", "question-mark", "settings", "x"]
icon_demo = []
for icon in builtin_icons:
icon_demo.append(Button(label=icon, button_type="light", icon=BuiltinIcon(icon, size="1.2em")))
show(column(icon_demo))
""")
color = Color(default="gray", help="""
Color to use for the icon.
""")
| BuiltinIcon |
python | encode__django-rest-framework | tests/test_viewsets.py | {
"start": 3026,
"end": 3102
} | class ____:
def __init__(self):
self.mapping = {}
| ThingWithMapping |
python | huggingface__transformers | src/transformers/models/whisper/modeling_whisper.py | {
"start": 8315,
"end": 9628
} | class ____(nn.Embedding):
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__(num_positions, embedding_dim)
def forward(self, input_ids, past_key_values_length=0, position_ids=None):
if position_ids is None:
return self.weight[past_key_values_length : past_key_values_length + input_ids.shape[1]]
else:
return self.weight[position_ids]
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs,
):
if scaling is None:
scaling = query.size(-1) ** -0.5
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None and attention_mask.ndim == 4:
attn_weights = attn_weights + attention_mask[:, :, :, : key.shape[-2]]
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| WhisperPositionalEmbedding |
python | PrefectHQ__prefect | tests/server/schemas/test_schedules.py | {
"start": 25445,
"end": 30070
} | class ____:
async def test_rrule_schedule_hourly_daylight_savings_time_forward_with_UTC(
self,
):
"""
On 3/11/2018, at 2am, America/New_York switched clocks forward an hour.
"""
dt = datetime(2018, 3, 11, 4, tzinfo=ZoneInfo("UTC"))
s = RRuleSchedule.from_rrule(rrule.rrule(rrule.HOURLY, dtstart=dt))
dates = await s.get_dates(n=5, start=dt)
assert dates[0].tzname() == "UTC"
# skip 2am
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
23,
0,
1,
3,
4,
]
# constant hourly clock in utc time
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [4, 5, 6, 7, 8]
async def test_rrule_schedule_hourly_daylight_savings_time_forward(self):
"""
On 3/11/2018, at 2am, America/New_York switched clocks forward an hour.
"""
dt = datetime(2018, 3, 10, 23, tzinfo=ZoneInfo("America/New_York"))
s = RRuleSchedule.from_rrule(rrule.rrule(rrule.HOURLY, dtstart=dt))
dates = await s.get_dates(n=5, start=dt)
# skip 2am
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
23,
0,
1,
3,
4,
]
# constant hourly clock in utc time
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [4, 5, 6, 7, 8]
async def test_rrule_schedule_hourly_daylight_savings_time_backward(self):
"""
11/4/2018, at 2am, America/New_York switched clocks back an hour.
"""
dt = datetime(2018, 11, 3, 23, tzinfo=ZoneInfo("America/New_York"))
s = RRuleSchedule.from_rrule(rrule.rrule(rrule.HOURLY, dtstart=dt))
dates = await s.get_dates(n=5, start=dt)
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
23,
0,
1,
2,
3,
]
# skips an hour UTC - note rrule clocks skip the "6"
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [3, 4, 5, 7, 8]
async def test_rrule_schedule_daily_start_daylight_savings_time_forward(self):
"""
On 3/11/2018, at 2am, America/New_York switched clocks forward an hour.
Confirm that a clock for 9am America/New_York stays 9am through the switch.
"""
dt = datetime(2018, 3, 8, 9, tzinfo=ZoneInfo("America/New_York"))
s = RRuleSchedule.from_rrule(rrule.rrule(rrule.DAILY, dtstart=dt))
dates = await s.get_dates(n=5, start=dt)
# constant 9am start
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
9,
9,
9,
9,
9,
]
# utc time shifts
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [
14,
14,
14,
13,
13,
]
async def test_rrule_schedule_daily_start_daylight_savings_time_backward(self):
"""
On 11/4/2018, at 2am, America/New_York switched clocks back an hour.
Confirm that a clock for 9am America/New_York stays 9am through the switch.
"""
dt = datetime(2018, 11, 1, 9, tzinfo=ZoneInfo("America/New_York"))
s = RRuleSchedule.from_rrule(rrule.rrule(rrule.DAILY, dtstart=dt))
dates = await s.get_dates(n=5, start=dt)
# constant 9am start
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
9,
9,
9,
9,
9,
]
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [
13,
13,
13,
14,
14,
]
async def test_rrule_schedule_daily_start_daylight_savings_time_backward_utc(self):
"""
On 11/4/2018, at 2am, America/New_York switched clocks back an hour.
Confirm that a clock for 9am UTC stays 9am through the switch.
"""
dt = datetime(2018, 11, 1, 9, tzinfo=ZoneInfo("UTC"))
s = RRuleSchedule.from_rrule(rrule.rrule(rrule.DAILY, dtstart=dt))
dates = await s.get_dates(n=5, start=dt)
# constant 9am start
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
5,
5,
5,
4,
4,
]
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [9, 9, 9, 9, 9]
| TestRRuleScheduleDaylightSavingsTime |
python | scrapy__scrapy | scrapy/spiders/feed.py | {
"start": 4315,
"end": 6413
} | class ____(Spider):
"""Spider for parsing CSV feeds.
It receives a CSV file in a response; iterates through each of its rows,
and calls parse_row with a dict containing each field's data.
You can set some options regarding the CSV file, such as the delimiter, quotechar
and the file's headers.
"""
delimiter: str | None = (
None # When this is None, python's csv module's default delimiter is used
)
quotechar: str | None = (
None # When this is None, python's csv module's default quotechar is used
)
headers: list[str] | None = None
def process_results(
self, response: Response, results: Iterable[Any]
) -> Iterable[Any]:
"""This method has the same purpose as the one in XMLFeedSpider"""
return results
def adapt_response(self, response: Response) -> Response:
"""This method has the same purpose as the one in XMLFeedSpider"""
return response
def parse_row(self, response: Response, row: dict[str, str]) -> Any:
"""This method must be overridden with your custom spider functionality"""
raise NotImplementedError
def parse_rows(self, response: Response) -> Any:
"""Receives a response and a dict (representing each row) with a key for
each provided (or detected) header of the CSV file. This spider also
gives the opportunity to override adapt_response and
process_results methods for pre and post-processing purposes.
"""
for row in csviter(
response, self.delimiter, self.headers, quotechar=self.quotechar
):
ret = iterate_spider_output(self.parse_row(response, row))
yield from self.process_results(response, ret)
def _parse(self, response: Response, **kwargs: Any) -> Any:
if not hasattr(self, "parse_row"):
raise NotConfigured(
"You must define parse_row method in order to scrape this CSV feed"
)
response = self.adapt_response(response)
return self.parse_rows(response)
| CSVFeedSpider |
python | great-expectations__great_expectations | great_expectations/core/configuration.py | {
"start": 1237,
"end": 1568
} | class ____(Schema):
REMOVE_KEYS_IF_NONE = ["id", "name"]
@post_dump
def filter_none(self, data: dict, **kwargs) -> dict:
return {
key: value
for key, value in data.items()
if key not in AbstractConfigSchema.REMOVE_KEYS_IF_NONE or value is not None
}
| AbstractConfigSchema |
python | django-haystack__django-haystack | test_haystack/test_query.py | {
"start": 3464,
"end": 14312
} | class ____(TestCase):
fixtures = ["base_data.json", "bulk_data.json"]
@classmethod
def setUpClass(cls):
for connection in connections.all():
connection.get_unified_index().reset()
super().setUpClass()
def setUp(self):
super().setUp()
self.bsq = BaseSearchQuery()
def test_get_count(self):
self.bsq.add_filter(SQ(foo="bar"))
self.assertRaises(NotImplementedError, self.bsq.get_count)
def test_build_query(self):
self.bsq.add_filter(SQ(foo="bar"))
self.assertRaises(NotImplementedError, self.bsq.build_query)
def test_add_filter(self):
self.assertEqual(len(self.bsq.query_filter), 0)
self.bsq.add_filter(SQ(foo="bar"))
self.assertEqual(len(self.bsq.query_filter), 1)
self.bsq.add_filter(SQ(foo__lt="10"))
self.bsq.add_filter(~SQ(claris="moof"))
self.bsq.add_filter(SQ(claris="moof"), use_or=True)
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: OR ((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof)>",
)
self.bsq.add_filter(SQ(claris="moof"))
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof)>",
)
self.bsq.add_filter(SQ(claris="wtf mate"))
self.assertEqual(
repr(self.bsq.query_filter),
"<SQ: AND (((foo__content=bar AND foo__lt=10 AND NOT (claris__content=moof)) OR claris__content=moof) AND claris__content=moof AND claris__content=wtf mate)>",
)
def test_add_order_by(self):
self.assertEqual(len(self.bsq.order_by), 0)
self.bsq.add_order_by("foo")
self.assertEqual(len(self.bsq.order_by), 1)
def test_clear_order_by(self):
self.bsq.add_order_by("foo")
self.assertEqual(len(self.bsq.order_by), 1)
self.bsq.clear_order_by()
self.assertEqual(len(self.bsq.order_by), 0)
def test_add_model(self):
self.assertEqual(len(self.bsq.models), 0)
self.assertRaises(AttributeError, self.bsq.add_model, object)
self.assertEqual(len(self.bsq.models), 0)
self.bsq.add_model(MockModel)
self.assertEqual(len(self.bsq.models), 1)
self.bsq.add_model(AnotherMockModel)
self.assertEqual(len(self.bsq.models), 2)
def test_set_limits(self):
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
def test_clear_limits(self):
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
self.bsq.clear_limits()
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
def test_add_boost(self):
self.assertEqual(self.bsq.boost, {})
self.bsq.add_boost("foo", 10)
self.assertEqual(self.bsq.boost, {"foo": 10})
def test_add_highlight(self):
self.assertEqual(self.bsq.highlight, False)
self.bsq.add_highlight()
self.assertEqual(self.bsq.highlight, True)
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
msq = MockSearchQuery()
msq.backend = MockSearchBackend("mlt")
ui = connections["default"].get_unified_index()
bmmsi = BasicMockModelSearchIndex()
ui.build(indexes=[bmmsi])
bmmsi.update()
msq.more_like_this(mock)
self.assertEqual(msq.get_count(), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
def test_add_field_facet(self):
self.bsq.add_field_facet("foo")
self.assertEqual(self.bsq.facets, {"foo": {}})
self.bsq.add_field_facet("bar")
self.assertEqual(self.bsq.facets, {"foo": {}, "bar": {}})
def test_add_date_facet(self):
self.bsq.add_date_facet(
"foo",
start_date=datetime.date(2009, 2, 25),
end_date=datetime.date(2009, 3, 25),
gap_by="day",
)
self.assertEqual(
self.bsq.date_facets,
{
"foo": {
"gap_by": "day",
"start_date": datetime.date(2009, 2, 25),
"end_date": datetime.date(2009, 3, 25),
"gap_amount": 1,
}
},
)
self.bsq.add_date_facet(
"bar",
start_date=datetime.date(2008, 1, 1),
end_date=datetime.date(2009, 12, 1),
gap_by="month",
)
self.assertEqual(
self.bsq.date_facets,
{
"foo": {
"gap_by": "day",
"start_date": datetime.date(2009, 2, 25),
"end_date": datetime.date(2009, 3, 25),
"gap_amount": 1,
},
"bar": {
"gap_by": "month",
"start_date": datetime.date(2008, 1, 1),
"end_date": datetime.date(2009, 12, 1),
"gap_amount": 1,
},
},
)
def test_add_query_facet(self):
self.bsq.add_query_facet("foo", "bar")
self.assertEqual(self.bsq.query_facets, [("foo", "bar")])
self.bsq.add_query_facet("moof", "baz")
self.assertEqual(self.bsq.query_facets, [("foo", "bar"), ("moof", "baz")])
self.bsq.add_query_facet("foo", "baz")
self.assertEqual(
self.bsq.query_facets, [("foo", "bar"), ("moof", "baz"), ("foo", "baz")]
)
def test_add_stats(self):
self.bsq.add_stats_query("foo", ["bar"])
self.assertEqual(self.bsq.stats, {"foo": ["bar"]})
self.bsq.add_stats_query("moof", ["bar", "baz"])
self.assertEqual(self.bsq.stats, {"foo": ["bar"], "moof": ["bar", "baz"]})
def test_add_narrow_query(self):
self.bsq.add_narrow_query("foo:bar")
self.assertEqual(self.bsq.narrow_queries, set(["foo:bar"]))
self.bsq.add_narrow_query("moof:baz")
self.assertEqual(self.bsq.narrow_queries, set(["foo:bar", "moof:baz"]))
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
# Custom class.
class IttyBittyResult:
pass
self.bsq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult))
# Reset to default.
self.bsq.set_result_class(None)
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
def test_run(self):
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
msq = connections["default"].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
# Restore.
connections["default"]._index = self.old_unified_index
def test_clone(self):
self.bsq.add_filter(SQ(foo="bar"))
self.bsq.add_filter(SQ(foo__lt="10"))
self.bsq.add_filter(~SQ(claris="moof"))
self.bsq.add_filter(SQ(claris="moof"), use_or=True)
self.bsq.add_order_by("foo")
self.bsq.add_model(MockModel)
self.bsq.add_boost("foo", 2)
self.bsq.add_highlight()
self.bsq.add_field_facet("foo")
self.bsq.add_date_facet(
"foo",
start_date=datetime.date(2009, 1, 1),
end_date=datetime.date(2009, 1, 31),
gap_by="day",
)
self.bsq.add_query_facet("foo", "bar")
self.bsq.add_stats_query("foo", "bar")
self.bsq.add_narrow_query("foo:bar")
clone = self.bsq._clone()
self.assertTrue(isinstance(clone, BaseSearchQuery))
self.assertEqual(len(clone.query_filter), 2)
self.assertEqual(len(clone.order_by), 1)
self.assertEqual(len(clone.models), 1)
self.assertEqual(len(clone.boost), 1)
self.assertEqual(clone.highlight, True)
self.assertEqual(len(clone.facets), 1)
self.assertEqual(len(clone.date_facets), 1)
self.assertEqual(len(clone.query_facets), 1)
self.assertEqual(len(clone.narrow_queries), 1)
self.assertEqual(clone.start_offset, self.bsq.start_offset)
self.assertEqual(clone.end_offset, self.bsq.end_offset)
self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__)
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections["default"].queries), 0)
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
self.bmmsi.update()
with self.settings(DEBUG=False):
msq = connections["default"].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(len(connections["default"].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
msq2 = connections["default"].get_query()
self.assertEqual(len(msq2.get_results()), 23)
self.assertEqual(len(connections["default"].queries), 1)
self.assertEqual(connections["default"].queries[0]["query_string"], "")
msq3 = connections["default"].get_query()
msq3.add_filter(SQ(foo="bar"))
len(msq3.get_results())
self.assertEqual(len(connections["default"].queries), 2)
self.assertEqual(connections["default"].queries[0]["query_string"], "")
self.assertEqual(connections["default"].queries[1]["query_string"], "")
# Restore.
connections["default"]._index = self.old_unified_index
| BaseSearchQueryTestCase |
python | boto__boto3 | tests/unit/dynamodb/test_transform.py | {
"start": 952,
"end": 2541
} | class ____(unittest.TestCase):
def setUp(self):
self.target_shape = 'MyShape'
self.original_value = 'orginal'
self.transformed_value = 'transformed'
self.transformer = ParameterTransformer()
self.json_model = {}
self.nested_json_model = {}
self.setup_models()
self.build_models()
def setup_models(self):
self.json_model = {
'operations': {
'SampleOperation': {
'name': 'SampleOperation',
'input': {'shape': 'SampleOperationInputOutput'},
'output': {'shape': 'SampleOperationInputOutput'},
}
},
'shapes': {
'SampleOperationInputOutput': {
'type': 'structure',
'members': {},
},
'String': {'type': 'string'},
},
}
def build_models(self):
self.service_model = ServiceModel(self.json_model)
self.operation_model = OperationModel(
self.json_model['operations']['SampleOperation'],
self.service_model,
)
def add_input_shape(self, shape):
self.add_shape(shape)
params_shape = self.json_model['shapes']['SampleOperationInputOutput']
shape_name = list(shape.keys())[0]
params_shape['members'][shape_name] = {'shape': shape_name}
def add_shape(self, shape):
shape_name = list(shape.keys())[0]
self.json_model['shapes'][shape_name] = shape[shape_name]
| BaseTransformationTest |
python | django__django | tests/admin_views/admin.py | {
"start": 14552,
"end": 15025
} | class ____(admin.ModelAdmin):
list_display = ["title", "slug"]
prepopulated_fields = {"slug": ("title",)}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ("slug",)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
| PrePopulatedPostAdmin |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 4079,
"end": 4875
} | class ____:
"""
Django REST Framework settings for APIv3.
Override global DRF settings for APIv3 in particular. All ViewSet should
inherit from this class to share/apply the same settings all over the APIv3.
.. note::
The only settings used from ``settings.REST_FRAMEWORK`` is
``DEFAULT_THROTTLE_RATES`` since it's not possible to define here.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
pagination_class = LimitOffsetPagination
LimitOffsetPagination.default_limit = 10
renderer_classes = (AlphabeticalSortedJSONRenderer, BrowsableAPIRenderer)
throttle_classes = (UserRateThrottle, AnonRateThrottle)
filter_backends = (filters.DjangoFilterBackend,)
metadata_class = SimpleMetadata
| APIv3Settings |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-scrapegraph/llama_index/tools/scrapegraph/base.py | {
"start": 258,
"end": 5750
} | class ____(BaseToolSpec):
"""
ScrapeGraph tool specification for web scraping operations.
This tool provides access to ScrapeGraph AI's web scraping capabilities,
including smart scraping, content conversion to markdown, search functionality,
and basic HTML scraping with various options.
"""
spec_functions = [
"scrapegraph_smartscraper",
"scrapegraph_markdownify",
"scrapegraph_search",
"scrapegraph_scrape",
"scrapegraph_agentic_scraper",
]
def __init__(self, api_key: Optional[str] = None) -> None:
"""
Initialize the ScrapeGraph tool specification.
Args:
api_key (Optional[str]): ScrapeGraph API key. If not provided,
will attempt to load from environment variable SGAI_API_KEY.
"""
if api_key:
self.client = Client(api_key=api_key)
else:
self.client = Client.from_env()
def scrapegraph_smartscraper(
self,
prompt: str,
url: str,
schema: Optional[Union[List[BaseModel], Dict[str, Any]]] = None,
**kwargs,
) -> Union[List[Dict], Dict]:
"""
Perform intelligent web scraping using ScrapeGraph's SmartScraper.
Args:
prompt (str): User prompt describing what data to extract from the webpage
url (str): Target website URL to scrape
schema (Optional[Union[List[BaseModel], Dict]]): Pydantic models or dict defining output structure
**kwargs: Additional parameters for the SmartScraper
Returns:
Union[List[Dict], Dict]: Scraped data matching the provided schema or prompt requirements
"""
try:
return self.client.smartscraper(
website_url=url, user_prompt=prompt, output_schema=schema, **kwargs
)
except Exception as e:
return {"error": f"SmartScraper failed: {e!s}"}
def scrapegraph_markdownify(self, url: str, **kwargs) -> str:
"""
Convert webpage content to markdown format using ScrapeGraph.
Args:
url (str): Target website URL to convert to markdown
**kwargs: Additional parameters for the markdownify operation
Returns:
str: Markdown representation of the webpage content
"""
try:
return self.client.markdownify(website_url=url, **kwargs)
except Exception as e:
return f"Markdownify failed: {e!s}"
def scrapegraph_search(
self, query: str, max_results: Optional[int] = None, **kwargs
) -> str:
"""
Perform a search query using ScrapeGraph's search functionality.
Args:
query (str): Search query to execute
max_results (Optional[int]): Maximum number of search results to return
**kwargs: Additional parameters for the search operation
Returns:
str: Search results from ScrapeGraph
"""
try:
search_params = {"query": query}
if max_results:
search_params["max_results"] = max_results
search_params.update(kwargs)
return self.client.search(**search_params)
except Exception as e:
return f"Search failed: {e!s}"
def scrapegraph_scrape(
self,
url: str,
render_heavy_js: bool = False,
headers: Optional[Dict[str, str]] = None,
**kwargs,
) -> Dict[str, Any]:
"""
Perform basic HTML scraping using ScrapeGraph's scrape functionality.
Args:
url (str): Target website URL to scrape
render_heavy_js (bool): Whether to enable JavaScript rendering for dynamic content
headers (Optional[Dict[str, str]]): Custom HTTP headers to include in the request
**kwargs: Additional parameters for the scrape operation
Returns:
Dict[str, Any]: Dictionary containing scraped HTML content and metadata
"""
try:
scrape_params = {"website_url": url, "render_heavy_js": render_heavy_js}
if headers:
scrape_params["headers"] = headers
scrape_params.update(kwargs)
return self.client.scrape(**scrape_params)
except Exception as e:
return {"error": f"Scrape failed: {e!s}"}
def scrapegraph_agentic_scraper(
self,
prompt: str,
url: str,
schema: Optional[Union[List[BaseModel], Dict[str, Any]]] = None,
**kwargs,
) -> Union[List[Dict], Dict]:
"""
Perform agentic web scraping that can navigate and interact with websites.
Args:
prompt (str): User prompt describing the scraping task and navigation requirements
url (str): Target website URL to start scraping from
schema (Optional[Union[List[BaseModel], Dict]]): Pydantic models or dict defining output structure
**kwargs: Additional parameters for the agentic scraper
Returns:
Union[List[Dict], Dict]: Scraped data from the agentic navigation and extraction
"""
try:
return self.client.agentic_scraper(
website_url=url, user_prompt=prompt, output_schema=schema, **kwargs
)
except Exception as e:
return {"error": f"Agentic scraper failed: {e!s}"}
| ScrapegraphToolSpec |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.