language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | tests/utilities/test_collections.py | {
"start": 466,
"end": 518
} | class ____(BaseAnnotation):
pass
| ExampleAnnotation |
python | huggingface__transformers | src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py | {
"start": 44469,
"end": 48449
} | class ____(DeepseekVLProcessor):
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
images: Optional[ImageInput] = None,
**kwargs: Unpack[DeepseekVLHybridProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
DeepseekVLHybridImageProcessor's [`~DeepseekVLHybridImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
DeepseekVLHybridProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs
)
if text is None and images is None:
raise ValueError("You must specify either text or images.")
if text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = []
one_img_tokens = self.image_token * self.num_image_tokens
for prompt in text:
prompt = prompt.replace(self.image_token, one_img_tokens)
prompt_strings.append(prompt)
data = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
# process images if pixel_values are provided
if images is not None:
inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
data["pixel_values"] = inputs["pixel_values"]
data["high_res_pixel_values"] = inputs["high_res_pixel_values"]
return BatchFeature(data=data)
__all__ = [
"DeepseekVLHybridConfig",
"DeepseekVLHybridPreTrainedModel",
"DeepseekVLHybridModel",
"DeepseekVLHybridForConditionalGeneration",
"DeepseekVLHybridImageProcessor",
"DeepseekVLHybridImageProcessorFast",
"DeepseekVLHybridProcessor",
]
| DeepseekVLHybridProcessor |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 44687,
"end": 46078
} | class ____(ColumnConstraint):
"""A column constraint that ensures all values in a pandas column are less than the provided
upper bound [inclusive].
Args:
max_value (Union[int, float, datetime.datetime]): The upper bound.
ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.
"""
def __init__(self, max_value, ignore_missing_vals):
self.max_value = check.inst_param(max_value, "max_value", (int, float, datetime))
self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")
super().__init__(
markdown_description=f"values < {self.max_value}",
error_description=f"Column must have values < {self.max_value}",
)
def validate(self, dataframe, column_name):
invalid = dataframe[column_name] > self.max_value
if self.ignore_missing_vals:
invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])
out_of_bounds_rows = dataframe[invalid]
if not out_of_bounds_rows.empty:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=self.error_description,
column_name=column_name,
offending_rows=out_of_bounds_rows,
)
| MaxValueColumnConstraint |
python | vyperlang__vyper | vyper/semantics/environment.py | {
"start": 771,
"end": 853
} | class ____(_EnvType):
_id = "chain"
_type_members = {"id": UINT256_T}
| _Chain |
python | lepture__mistune | src/mistune/directives/admonition.py | {
"start": 236,
"end": 2207
} | class ____(DirectivePlugin):
SUPPORTED_NAMES = {
"attention",
"caution",
"danger",
"error",
"hint",
"important",
"note",
"tip",
"warning",
}
def parse(self, block: "BlockParser", m: Match[str], state: "BlockState") -> Dict[str, Any]:
name = self.parse_type(m)
attrs = {"name": name}
options = dict(self.parse_options(m))
if "class" in options:
attrs["class"] = options["class"]
title = self.parse_title(m)
if not title:
title = name.capitalize()
content = self.parse_content(m)
children = [
{
"type": "admonition_title",
"text": title,
},
{
"type": "admonition_content",
"children": self.parse_tokens(block, content, state),
},
]
return {
"type": "admonition",
"children": children,
"attrs": attrs,
}
def __call__(self, directive: "BaseDirective", md: "Markdown") -> None:
for name in self.SUPPORTED_NAMES:
directive.register(name, self.parse)
assert md.renderer is not None
if md.renderer.NAME == "html":
md.renderer.register("admonition", render_admonition)
md.renderer.register("admonition_title", render_admonition_title)
md.renderer.register("admonition_content", render_admonition_content)
def render_admonition(self: Any, text: str, name: str, **attrs: Any) -> str:
html = '<section class="admonition ' + name
_cls = attrs.get("class")
if _cls:
html += " " + _cls
return html + '">\n' + text + "</section>\n"
def render_admonition_title(self: Any, text: str) -> str:
return '<p class="admonition-title">' + text + "</p>\n"
def render_admonition_content(self: Any, text: str) -> str:
return text
| Admonition |
python | getsentry__sentry | src/sentry/data_secrecy/cache.py | {
"start": 148,
"end": 1273
} | class ____:
@staticmethod
def get(organization_id: int) -> EffectiveGrantStatus:
"""
Retrieve cached grant status for an organization.
"""
cache_key = CACHE_KEY_PATTERN.format(organization_id=organization_id)
cached_data = cache.get(cache_key)
return EffectiveGrantStatus.from_cache(cached_data)
@staticmethod
def set(
organization_id: int, grant_status: EffectiveGrantStatus, current_time: datetime
) -> None:
"""
Set the cached grant status for an organization.
"""
cache_key = CACHE_KEY_PATTERN.format(organization_id=organization_id)
cache.set(
cache_key,
grant_status,
timeout=grant_status.cache_ttl(current_time),
)
@staticmethod
def delete(organization_id: int) -> None:
"""
Delete the cached grant status for an organization.
"""
cache_key = CACHE_KEY_PATTERN.format(organization_id=organization_id)
cache.delete(cache_key)
effective_grant_status_cache = EffectiveGrantStatusCache()
| EffectiveGrantStatusCache |
python | google__pytype | pytype/pytd/parse/node_test.py | {
"start": 1973,
"end": 2288
} | class ____(visitors.Visitor):
"""A visitor that skips XY.y subtrees."""
def EnterXY(self, _):
return {"y"}
def VisitData(self, data):
"""Visit Data nodes, and zero all data."""
return data.Replace(d1=0, d2=0, d3=0)
# We want to test == and != so:
# pylint: disable=g-generic-assert
| SkipNodeVisitor |
python | keras-team__keras | keras/src/optimizers/__init__.py | {
"start": 3476,
"end": 3974
} | class ____:
def __init__(self, *args, **kwargs):
raise ImportError(
"`keras.optimizers.legacy` is not supported in Keras 3. When using "
"`tf.keras`, to continue using a `tf.keras.optimizers.legacy` "
"optimizer, you can install the `tf_keras` package (Keras 2) and "
"set the environment variable `TF_USE_LEGACY_KERAS=True` to "
"configure TensorFlow to use `tf_keras` when accessing `tf.keras`."
)
| LegacyOptimizerWarning |
python | astropy__astropy | astropy/units/tests/test_quantity.py | {
"start": 66606,
"end": 66822
} | class ____(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
| QuantityMimic2 |
python | joke2k__faker | faker/providers/person/bg_BG/__init__.py | {
"start": 44,
"end": 32408
} | class ____(PersonProvider):
first_names_female = (
"Авгия",
"Авигея",
"Авторка",
"Аглая",
"Аглоида",
"Агнешка",
"Адамина",
"Адра",
"Адрианиа",
"Аела",
"Айрен",
"Аксентия",
"Алания",
"Албина",
"Александрина",
"Алексиа",
"Аленка",
"Алиана",
"Алисия",
"Алтая",
"Амбър",
"Амория",
"АнаМария",
"Анатолия",
"Ангелка",
"Андика",
"Андриана",
"Анелина",
"анета",
"Анза",
"Анимира",
"Аница",
"Аномалия",
"Антоалина",
"Антонела",
"Ануша",
"Анхея-мей",
"Аполинария",
"Аралия",
"Аркадия",
"Арсена",
"Аселина",
"Асифа",
"Астромерия",
"Атина",
"Аурора",
"Багра",
"Балина",
"Бацислава",
"Беатрис",
"Бела",
"Белисима",
"Беломира",
"Бенелена",
"Берислава",
"Бея",
"Билена",
"Бисера",
"Биянка",
"Благодатка",
"Благосвета",
"Богдалина",
"Богородка",
"Бодурка",
"Божидара-силвестра",
"Божинела",
"Божурка",
"Бонифация",
"Борена",
"Бориска",
"Борянка",
"Боца",
"Бригита",
"Бронислава",
"Буна",
"Буча",
"Бързана",
"Ваклина",
"Валерия",
"Валя",
"Вангелия",
"Ванухи",
"Варта",
"Васенка",
"Василина",
"Васка",
"Вашиля",
"Вежда",
"Велиана",
"Велинна",
"Велиянка",
"Венда",
"Венеция",
"Венислава",
"Венчислава",
"Верена",
"Верислава",
"Веса",
"Веселинка",
"Весна",
"Веца",
"Видима",
"Виктория",
"Вилия",
"Вилхема",
"Виолета",
"Виржиния",
"Витомира",
"Вишка",
"Владилена",
"Владлена",
"Водица",
"Войнка",
"Вула",
"Възкресения",
"Въльо",
"Върбунка",
"Въца",
"Габи",
"Галена",
"Галина",
"Галя",
"Гануца",
"Гвоздейка",
"Гена",
"Георгелена",
"Георгица",
"Герга",
"Гердана",
"Гертруда",
"Гиздана",
"Гичка",
"Гордана",
"Гория",
"Гоца",
"Графица",
"Грета",
"Гримяна",
"Гроздинка",
"Гуна",
"Гъда",
"Гълъбица",
"Гюгра",
"Гюргя",
"Дакота",
"Дамяна",
"Данелина",
"Данимира",
"Данка",
"Дарданела",
"Дария",
"Дафинка",
"Деа",
"Деви",
"Делиана",
"Деля",
"Демирела",
"Дениандра",
"Дениславена",
"Денница",
"Десимиляна",
"Десияна",
"Дефлорина",
"Дея",
"Джана",
"Джиневра",
"Джулия",
"Диана-Мария",
"Дида",
"Дилмана",
"Димитра",
"Димка",
"Динна",
"Добрина",
"Дойка",
"Доменика",
"Доника",
"Дора-Анна",
"Дорина",
"Доста",
"Доча",
"Драгица",
"Дренка",
"Дуда",
"Душка",
"Дюкяна",
"Евангелина",
"Евдокия",
"Евридика",
"Едита",
"Ел",
"Елдора",
"Еленица",
"Елеонета",
"Ели",
"Елиз",
"Елина",
"Елиса",
"Елица",
"Елма",
"Елфида",
"Емануила",
"Емма",
"Еница",
"Ергана",
"Ермиля",
"Естела",
"Ефимена",
"Ефросиния",
"Жаклин",
"Жанин",
"Жара",
"Жейна",
"Желязка",
"Женимира",
"Жива",
"Живомира",
"Жичка",
"Жорка",
"Жулиана",
"Заека",
"Занка",
"Зафа",
"Захаринка",
"Звездемира",
"Здравелина",
"Здухостина",
"Зинаида",
"Златея",
"Златка",
"Златомира",
"Зоичка",
"Зорка",
"Зузичка",
"Ивалена",
"ивамина",
"Иванеса",
"Иваничка",
"Ивелиана",
"Ивинка",
"Иглика",
"Изидора",
"Илеана",
"Илианна",
"Илинда",
"Илка",
"Инан",
"Инеса",
"Ира",
"Ирин",
"Ирла",
"Исихия",
"Истилияна",
"Йоана",
"Йоанна",
"Йованка",
"Йоко",
"Йолина",
"Йона",
"Йоника",
"Йорданка",
"Йоханна",
"Кадифейка",
"Калея",
"Калина",
"Калиса",
"Калуда",
"Камея",
"Кануша",
"Карамелита",
"Карина",
"Касиди",
"Катастрофа",
"Катинка",
"Каунка",
"Кветослава",
"Керанка",
"Кети",
"Кино",
"Кирка",
"Китчица",
"Клара",
"Клеуна",
"Клоя",
"Кокимира",
"Комара",
"Константина",
"Корнелия",
"Костадинка",
"Кралина",
"Красидара",
"Красияна",
"Криси",
"кристабела",
"Кристиана",
"Кристия",
"Кръстанка",
"Ксандриния",
"Кунка",
"Кьнина",
"Лада",
"Лазура",
"Лалка",
"Лариса",
"Лаца",
"Лека",
"Ленче",
"Летисия",
"Либерта",
"Лидийка",
"Лика",
"Лилия",
"Лилянка",
"Линда",
"Лия",
"Лозанка",
"Лорена",
"Лоти",
"Луна",
"Лъчезарка",
"Любина",
"Люблина",
"Любослава",
"Люляна",
"Люсила",
"Лянка",
"Магдалена",
"Мадлен",
"Майя",
"Максимилияна",
"Малена",
"Малтина",
"Манолина",
"Мара-антоанета",
"Маргит",
"Марен",
"Мари-анри",
"Марийка",
"Маринета",
"Мариотка",
"Мария",
"Мария-елена",
"Мария-Хуана",
"Марлена",
"Маруся",
"Маса",
"Матка",
"Маша",
"Медиха",
"Мелания",
"Мелъди",
"Меропа",
"Миглена",
"Мила",
"Милара",
"милдия",
"Милиана",
"Милост",
"Мимоза",
"Минка",
"Миранза",
"Мирена",
"Миропа",
"Мисла",
"Митошка",
"Михайлена",
"Мишка",
"Младлена",
"Момера",
"Моника",
"Мортадела",
"Мушана",
"Наводненка",
"Надка",
"Найда",
"Нани",
"Настия",
"Наташа",
"Невена",
"Негрита",
"Неделяна",
"Нейка",
"Нелида",
"Нелла",
"Неолина",
"Нешка",
"Нигрита",
"Никоела",
"Николина",
"Нионила",
"Нона",
"Норка",
"Нурета",
"Огнена",
"Октавия",
"Оливера",
"Омана",
"Орлеана",
"Орхидея",
"Павилия",
"Павлина",
"Палвира",
"Паломина",
"Панда",
"Пантера",
"Парашкевица",
"Парунка",
"Патриотка",
"Паулина",
"Паца",
"Пейолина",
"Пелина",
"Пепелота",
"Периана",
"перуна",
"Петинка",
"Петрийка",
"Петромира",
"Петрушка",
"Пешка",
"Пламена",
"Плодовитка",
"Полексина",
"Полин",
"Правда",
"Преса",
"Прина",
"Пролетина",
"Простисвета",
"Пупи",
"Първолетка",
"Рада",
"Радиа",
"Радимира",
"Радка",
"Радосвета",
"Радостка",
"Раинка",
"Райничка",
"Рамина",
"Ревка",
"Ренгия",
"Риана",
"Римма",
"Рия",
"Роза",
"Розана",
"Розета",
"Розка",
"Роксана",
"Ромолета",
"Роселина",
"Росислава",
"Ростислава",
"Ружка",
"Румислава",
"Русалия",
"Руска",
"Сабина",
"Савета",
"Салина",
"Санка",
"Сарая",
"Сахория",
"Свежа",
"Светла",
"Светломира",
"Свидна",
"Свободка",
"Севда",
"севделина",
"Севета",
"Семенарка",
"Сергелинка",
"Сибила",
"Сиена",
"Силви",
"Силвия-александра",
"Силяна",
"Симона",
"Синтия",
"Сисоя",
"Скакалка",
"Славея",
"Славка",
"Сладоледка",
"Смехотерапия",
"Смирна",
"Снежинка",
"Софийка",
"Спасена",
"Спасияна",
"Спирела",
"Стависара",
"Стаматка",
"Станиела",
"Станимирка",
"Сташа",
"Стелина",
"Стефани",
"Стеяна",
"Стоимена",
"Столетка",
"Стоянка",
"Сузи",
"Съвестина",
"Сърменка",
"Таисия",
"тамара",
"Таня",
"Ташимира",
"Теа",
"Телефонка",
"Темира",
"Теодора",
"Теса",
"Тилиана",
"Тиха",
"Тоанета",
"Толиана",
"Тона",
"Тоницвета",
"Тоска",
"Тошка",
"Трендафила",
"Трифонка",
"Троша",
"Труфана",
"Тръпка",
"Туфка",
"Улиана",
"Урима",
"Фабияна",
"Фатиме",
"Феня",
"Фикия",
"Филипини",
"Фимка",
"Флавия",
"Флорика",
"Фотинка",
"Фронка",
"Фуга",
"Хана",
"Харитония",
"Хенриета",
"Хинка",
"Холи",
"Хранислава",
"Хрисанка",
"Христа",
"Христела",
"Христилияна",
"Христоелена",
"Христя",
"Хубавелка",
"Цанета",
"Царевна",
"Цветана",
"Цветелина",
"Цветилена",
"Цветлина",
"Цветолилия",
"Цветяна",
"Цеца",
"Цола",
"Цоня",
"Чана",
"Чардафона",
"Чачия",
"Череша",
"Четвърта",
"Чона",
"Чубрина",
"Шана",
"Шена",
"Шехерезада",
"Шинка",
"Щедра",
"Щериана",
"Щефания",
"Щилянка",
"Щтилка",
"Ъгленка",
"Ъчка",
"Юлиена",
"Юлия",
"Юнона",
"Юрита",
"Юстианна",
"Ябленка",
"Явора",
"Ягода",
"Ялислава",
"Яна-Мартина",
"Янина",
"Яниславия",
"Янка",
"Ярка",
"Ясена",
"Ятана",
)
first_names_male = (
"Аблен",
"Август",
"Августиан",
"Августин",
"Авел",
"Авер",
"Аверно",
"Авксентий",
"Аво",
"Аврам",
"Аврели",
"Аврелий",
"Аврор",
"Агапи",
"Агапий",
"Агатопод",
"Агент",
"Аглай",
"Агнен",
"Агнеш",
"Агоп",
"Агъци",
"Адалберт",
"Адам",
"Адеан",
"Аделин",
"Адем",
"Адриан",
"Адриян",
"Аерозол",
"Азалия",
"Айдемир",
"Акашия",
"Аксакусти",
"Аксидан",
"Аксинтия",
"Алберт",
"Албияна",
"Алдин",
"Алевандър",
"Алег",
"Алек",
"Алекзандриян",
"Беримир",
"Берин",
"Берия",
"Беро",
"Берослав",
"Бетина",
"Бетино",
"Бечо",
"Билян",
"Бинко",
"Биньо",
"Бисенти",
"Бисер",
"Благо",
"Благовест",
"Благой",
"Благомир",
"Благосвет",
"Блаже",
"Бог",
"Богиня",
"Богой",
"Боголюб",
"Богомил",
"Богослав",
"Бодромир",
"Божан",
"Божидар",
"Божик",
"Божимир",
"Божин",
"Божинел",
"Божко",
"Божо",
"Божур",
"Боил",
"Боила",
"Бойко",
"Бойчо",
"Болен",
"Болеслав",
"Боне",
"Бонислав",
"Бонко",
"Боно",
"Веселин",
"Весислав",
"Весо",
"Веспасиян",
"Ветко",
"Вечко",
"Вигалот",
"Виго",
"Виделин",
"Виден",
"Видин",
"Видослав",
"Видю",
"Викенти",
"Виктор",
"Вилиан",
"Вилизар",
"Вилизара",
"Вилислав",
"Вилиян",
"Винету",
"Винко",
"Вино",
"Винсънт",
"Винченцо",
"Виолет",
"Виолин",
"Висарион",
"Виталий",
"Витко",
"Витлян",
"Витомир",
"Витош",
"Вихрен",
"Вихрони",
"Вихър",
"Вичо",
"Виша",
"Вишетин",
"Вишню",
"Влад",
"Владилен",
"Владимер",
"Владимир",
"Галентин",
"Галиен",
"Галимир",
"Галиян",
"Гани",
"Ганислав",
"Ганцомир",
"Ганчо",
"Ганьо",
"Гаро",
"Гатьо",
"Гацо",
"Гвардиана",
"Гелемир",
"Генади",
"ГенадиВалериев",
"Генадий",
"Генислав",
"Генко",
"Гено",
"Генчо",
"Гео",
"Геодим",
"Геомил",
"Георги",
"Герасим",
"Герган",
"Гергей",
"Гергелюб",
"Гергин",
"Гердан",
"Герман",
"Геро",
"Герой",
"Герчо",
"Гетислав",
"Гетко",
"Гето",
"Гецо",
"Гечо",
"Гешо",
"Гивеза",
"Гиздален",
"Гико",
"Гилдрой",
"Делчо",
"Делян",
"Деляна",
"Демир",
"Демян",
"Дениз",
"Деника",
"Денимир",
"Денис",
"Денислав",
"Дениян",
"Денчо",
"Дердидас",
"десилиан",
"Десимир",
"Десислав",
"Деслав",
"Деспин",
"Деспинка",
"Деспот",
"Детелин",
"Дечко",
"Дечо",
"Дечю",
"Дешо",
"Деян",
"Джанер",
"Джанко",
"Джихад",
"Джон-стефан",
"Диаманди",
"Диамантина",
"Диан",
"Диван(надядоДианидядоИван)",
"Дивизие",
"Дивизия",
"Дивил",
"Дидко",
"Диего",
"Дико",
"Дилян",
"Евстати",
"Евстатий",
"Евстахий",
"Евтим",
"Егор",
"Едвин",
"Едит",
"Едрю",
"Едуард",
"Еким",
"Ектар",
"Ектор",
"Елвис",
"Елеан",
"Електрон",
"Елемаг",
"Еленко",
"Елиан",
"Елиас",
"Елиезер",
"Елизабет",
"Елин",
"Елисей",
"Елисия",
"Елко",
"Ельо",
"Ема-Бела",
"Еманоил",
"Емануел",
"Емануил",
"Емил",
"Емилиан",
"Емилиян",
"Ендо",
"Енчо",
"Еньо",
"Еню",
"Ервин",
"Ередин",
"Еремия",
"Ерик",
"Ерина",
"Ерол",
"Ерсен",
"Есен",
"Етиен",
"Ефка",
"Заварин",
"Завен",
"Замфир",
"Занго",
"Занко",
"Запрян",
"Зарко",
"Зафер",
"Зафир",
"Захар",
"Захари",
"Захарин",
"Захо",
"Звездан",
"Звездин",
"Звездислав",
"Звездиян",
"Звездолет",
"Звездомир",
"Згура",
"Здравелин",
"Здравец",
"Здравко",
"Здравчо",
"Зенгин",
"Зика",
"Зинко",
"Зинови",
"Златан",
"Злати",
"Златил",
"Златимир",
"Златиян",
"Златко",
"Златогор",
"Златозар",
"Златомир",
"Златослав",
"Златоцвет",
"Златьо",
"Золтан",
"Илиомар",
"Илич",
"Илия",
"Илиян",
"Илко",
"Илчо",
"Имилиан",
"Ингемунд",
"Инко",
"Инчо",
"Иполит",
"Ириан",
"Ириней",
"иринеус",
"Ириян",
"Ирко",
"Ирма",
"Ирник",
"Исак",
"Исидор",
"Искрен",
"Искър",
"Исперих",
"Истан",
"Истатко",
"Истилян",
"Исус",
"Итко",
"Ихтиандър",
"Ицо",
"Ичо",
"Йено",
"Йеремия",
"Йоан",
"Йоан-Александър",
"Йоан-иво",
"Йован",
"Йовица",
"Йовко",
"Йово",
"Йовро",
"Йовцо",
"Йовчо",
"Йожи",
"Йоил",
"Йолиян",
"Йолко",
"Карен",
"Карим",
"Карин",
"Карло",
"Кармен",
"Каролин",
"Карчо",
"Катакомб",
"Каталинка",
"Катерин",
"Кевин",
"Кеворк",
"Кери",
"Кибер",
"Кимба",
"Кимбо",
"Кимон",
"Кимчо",
"Кин",
"Кинка",
"Кинта",
"Киприслав",
"Киприян",
"Кириен",
"Кирил",
"Кирчо",
"Киряк",
"Киряки",
"Киряко",
"Кис",
"Кит",
"Кито",
"Китодар",
"Китомир",
"Клим",
"Климент",
"Кнут",
"Козма",
"Койно",
"Койо",
"Койчо",
"Коко",
"Коле",
"Колчо",
"Кольо",
"Колю",
"Комнин",
"Лалко",
"Лало",
"Лальо",
"Ламби",
"Ламбо",
"Ламбю",
"Ланселот",
"Ласкал",
"Ласкар",
"Ластър",
"Латин",
"Латко",
"Латьо",
"Латю",
"ЛЕА-МАРИЯ",
"Леандър",
"Лев",
"Левент",
"Левчо",
"Леко",
"Ленин",
"Ленко",
"Леон",
"Леонид",
"Лесе",
"Лефтер",
"Лечо",
"Лилко",
"Лило",
"Лилчо",
"Лилян",
"Лимон",
"Липе",
"Лихия",
"Личо",
"Ловчо",
"Лозан",
"Лозана",
"Лозен",
"Лора-софия",
"Лоранс",
"Лука",
"Лукан",
"Луко",
"Лули",
"Лулчо",
"Лусио",
"Лусия",
"Маноел",
"Манол",
"Маноло",
"Мантас",
"Мануил",
"Мануш",
"МанчестърЮнайтед",
"Манчо",
"Маньо",
"Маню",
"Марангони",
"Маргарит",
"Марек",
"Мариан",
"Марий",
"Марин",
"Маринел",
"Маринчо",
"Марио",
"Мариоллита",
"Маритна",
"Мариян",
"Марк",
"Марк-антоний",
"Марко",
"Маркус",
"Мартен",
"Мартин",
"Мартиниян",
"Маруш",
"Марчело",
"Маслина",
"Матей",
"Матьо",
"Матю",
"Махно",
"Машо",
"Медиан",
"Мено",
"Мерилин",
"Месак",
"Метакса",
"Найо",
"Найчо",
"Наке",
"Нако",
"Нанко",
"Нано",
"Нансимир",
"Нанчо",
"Наню",
"Нарцислав",
"Наско",
"Настимир",
"Настрадин",
"Натанаил",
"Натко",
"Наум",
"Нафисат",
"Нафтали",
"Нацко",
"Нацо",
"Начиян",
"Начко",
"Начо",
"Невен",
"Невенко",
"Невилиян",
"Невян",
"Негослав",
"Неделин",
"Неделчо",
"Недислав",
"Недко",
"Недьо",
"Недю",
"Недялко",
"Нейко",
"Нейчо",
"Нелко",
"Нелчо",
"Нене",
"Ненко",
"Ненсислав",
"Ненчо",
"Неокли",
"Нерес",
"Обретим",
"Ованес",
"Огин",
"Огнемир",
"Огнен",
"Океан",
"Олег",
"Олек",
"Олимпи",
"Омар",
"Омуртаг",
"Оник",
"Онуфри",
"Ончо",
"Орлин",
"Ортодокси",
"Орфей",
"Орхидей",
"Орце",
"Оскар",
"Оханес",
"Павел",
"Павелин",
"Павко",
"Павлик",
"Павлин",
"Павлинчо",
"Павломир",
"Паисий",
"Пако",
"Палми",
"Палмиро",
"Панай",
"Панайот",
"Панде",
"Панделис",
"Пане",
"Панкртийян",
"Пано",
"Панталей",
"Пантелей",
"Панто",
"Пантю",
"Панчо",
"Паолина",
"Параскев",
"Параход",
"Парашкев",
"Парашкеван",
"Паруш",
"Паско",
"Паспанахил",
"Пато",
"Патрик",
"Патьо",
"Паулин",
"Паун",
"Пацо",
"Пачо",
"Пейко",
"Пейо",
"Пейодон",
"Пейтан",
"Пейчин",
"Пейчо",
"Пеко",
"Пелай",
"Пеле",
"Пенко",
"Пенчин",
"Пенчо",
"Пеньо",
"Пеню",
"Пео",
"Пепино",
"Пепислав",
"Пепо",
"Перикъл",
"Персиана",
"Персиян",
"Перчо",
"Петиконгрес",
"Петкан",
"Петко",
"Пето",
"Петраки",
"Петрана",
"Петринел",
"Петрозар",
"Петромил",
"Рангел",
"Рангел-любими",
"Рандю",
"Ранчо",
"Расате",
"Рафаил",
"Рачко",
"Рачо",
"Рашко",
"Рашо",
"Раю",
"Раян",
"Реан",
"Рем",
"Риналдо",
"Рис",
"Ристя",
"Ричард",
"Ричерд",
"Роберт",
"Роберто",
"Робин",
"Робърт",
"Рогелина",
"Рогена",
"Родан",
"Родион",
"Розалин",
"Розин",
"Розоцвет",
"Ройо",
"Роксан",
"Ромел",
"Ромелина",
"Ромен",
"Ромео",
"Ромил",
"Ромул",
"Росен",
"Росенка",
"Росимир",
"Роска",
"Роско",
"Ростиана",
"Ростимир",
"Ростислав",
"Светломир",
"Светлю",
"Светозар",
"Светослав",
"Себастиан",
"Себахтин",
"Севан",
"Севар",
"Севастин",
"Севдалин",
"Севдан",
"Севелин",
"Северин",
"Седеф",
"Седефчо",
"Селен",
"Селена",
"Сенко",
"Серафим",
"Сергей",
"Сеслав",
"Сиви",
"Сидер",
"Сидония",
"Сидор",
"Сиен",
"Силаги",
"Силан",
"Силвестър",
"Силвио",
"Силвиян",
"Силян",
"Симеон",
"Симо",
"Сирман",
"Сифоня",
"Скорбут",
"Слав",
"Славдо",
"Славе",
"Славейко",
"Славен",
"Слави",
"Славил",
"Теодоси",
"Теодосий",
"Теодослав",
"Теодостин",
"Теофан",
"Теохар",
"Тервел",
"Тигрони",
"Тило",
"Тильо",
"Тимо",
"Тимон",
"Тимотей",
"Тимчо",
"Тино",
"Тинчо",
"Тихо",
"Тихол",
"Тихомир",
"Тихон",
"Тишо",
"Тоде",
"Тодомирка",
"Тодораки",
"Тодорин",
"Тодорина",
"Токимир",
"Толек",
"Толю",
"Тома",
"Томас",
"Томен",
"Томинка",
"Томислав",
"Томо",
"Тоне",
"Тони",
"Тонимир",
"Тонислав",
"Тонко",
"Тончо",
"Топалко",
"Тополко",
"Тотко",
"Тотьо",
"Тотю",
"Тоцо",
"Филатей",
"Фили",
"Филидан",
"Филион",
"Филип",
"Филипас",
"Филипопол",
"Филко",
"Филомир",
"Филчо",
"Фильо",
"Финдо",
"Фиро",
"Фирчо",
"Фичо",
"Флори",
"Флориан",
"Флорин",
"Флоро",
"Фори",
"Фосил",
"Франк",
"Франц",
"Францислав",
"Фрацил",
"Фреди",
"Фродо",
"Фуго",
"Фуко",
"Фъстък",
"Фьодор",
"Хавтелин",
"Ханко",
"Хараламби",
"Харалампи",
"Харалан",
"Харбингър",
"Хари",
"Харизан",
"Харитон",
"Хасан",
"Хасатин",
"Хачо",
"Хвойне",
"Хебър",
"Хектор",
"Хераклит",
"Хернани",
"Хефестион",
"Химинай",
"Хинко",
"Хино",
"Хитко",
"Хороз",
"Храбрин",
"Храбър",
"Хранимир",
"Хрелко",
"Хрельо",
"Хрисим",
"Хрисимир",
"Хрисо",
"Христалин",
"Христивилин",
"Христиела",
"Христилиан",
"християн",
"Христо",
"Христо-никола",
"Христодор",
"Христозар",
"Христозорнициан",
"Христозорницомил",
"Христомил",
"Христомир",
"Христослав",
"Христофор",
"Хрисчо",
"Хрондел",
"Хрусан",
"Хубав",
"Хубавен",
"Хубан",
"Хубен",
"Цоню",
"Цоцо",
"Цочо",
"Цъки",
"Чавдар",
"Чанкете",
"Чанко",
"Чано",
"Чаньо",
"Чардафон",
"Чародей",
"Чауш",
"Чачо",
"Чвор",
"Чедомир",
"Ченко",
"Ченю",
"Чепо",
"Черноризец",
"Черньо",
"Чийо",
"Чико",
"Чило",
"Чонар",
"Чони",
"Чоно",
"Чоню",
"Чочо",
"Чочомир",
"Чубрик",
"Чуде",
"Чудо",
"Чудомир",
"Чудослав",
"Чук",
"Шабан",
"Шанко",
"Шаноу",
"Шаро",
"Шейна",
"Шеки",
"Шенко",
"Шенол",
"Шибил",
"Шидер",
"Шинко",
"Шино",
"Шипчан",
"Ширко",
"Шишман",
"Шкодри",
"Шмильо",
"Шмулю",
"Шпилко",
"Шушо",
"Щедрин",
"Щедю",
"Щеки",
"Щено",
"Щеню",
"Щерион",
"Щериян",
"Щерко",
"Щерьо",
"Щерю",
"Щилиян",
"Щилян",
"Щирян",
"Щоно",
"Щтърбан",
"Щтъркан",
"Щурк",
"Щърбан",
"Щъркан",
"Ъглен",
"Ълен",
"Ърнест",
"Ъруин",
"Ърчо",
"Ьобирдар",
"Юги",
"Юлиан",
"Юлий",
"Юлиян",
"Юрдан",
"Юри",
"Юрий",
"Юстин",
"Юстиниан",
"Яблен",
"Явор",
"Яго",
"Ягодин",
"Язо",
"Яким",
"Яко",
"Якоб",
"Яков",
"Якослав",
"Ян",
"Янадин",
"Янаки",
"Янакин",
"Яначко",
"Яне",
"Янег",
"Янедин",
"Янек",
"Яни",
"Яниел",
"Яник",
"Янимир",
"Янис",
"Янислав",
"Яничко",
"Янко",
"Янкул",
"Яно",
"Яномил",
"Янтар",
"Януш",
"Янцислав",
"Янче",
"Янчо",
"Ярно",
"Яромир",
"Ярце",
"Ярчо",
"Яръм",
"Ярю",
"Ясен",
"Ясер",
"Ястреб",
"Ятан",
"Яцо",
"Ячо",
"Яшар",
"Яшка",
"Яшо",
"Яшон",
)
last_names_male = (
"Симеонов",
"Данданов",
"Кърков",
"Братухчев",
"Цветков",
"Иванов",
"Яназов",
"Тодоров",
"Колчев",
"Келешев",
"Бърборков",
"Дришльов",
"Макаронски",
"Количков",
"Принов",
"Бодуров",
"Китов",
"Гьоков",
"Симеонов",
"Балахуров",
"Милачков",
"ЕвроповКирилов",
"Площаков",
"Мангъров",
"Хвърчилков",
"Дзезов",
"Ждраков",
"Тухчиев",
"Топков",
"Яков",
"Иликьов",
"Бурханларски",
"Вражалски",
"Тутурилов",
"Бранков",
"Зенгинов",
"Фенеров",
"Кучев",
"Възвъзов",
"Кьоров",
"Джогов",
"Пъков",
"Рангелов",
"Чутурков",
"Самсонов",
"Андонов",
"Бумов",
"Мочев",
"Дачев",
"Муев",
"Младенов",
"Тошев",
"Бедринов",
"Тумангелов",
"Канчин",
"Миленков",
"Патков",
"Пондьов",
"Самоходов",
"Четрафилски",
"Смърдански",
"Клатуров",
"Вакрилов",
"Прошков",
"Пулев",
"Парашкевов",
"Манавски",
"Чуков",
"Овнарски",
"Рошльов",
"Пройкова",
"Младенова",
"Кесьов",
"Римпопов",
"Златков",
"Колев",
"Пикянски",
"Николов",
"Цицков",
"Стойков",
"Каракашев",
"Докова",
"Мераков",
"Пеева",
"Педалов",
"Тъпчилещов",
"Въртунински",
"Кодуков",
"Татьозов",
"Токов",
"Юрганчев",
"Клатикрушев",
"Монтянов",
"Бобев",
"Топчийски",
"Луланков",
"Костов",
"Колипатков",
"Чукчуков",
"Катъров",
"Кобиларов",
"Лимонадов",
"Цоцов",
"Поаков",
"Недялков",
"Станишев",
"Йорданов",
"Щърбов",
"Занов",
)
last_names_female = (
"Кокошкова",
"Градинарова",
"Куртакова",
"Чанлиева",
"Тодорова",
"Пътечкова",
"Скринска",
"Сапунджиева",
"Вампирска",
"Васовa",
"Таралингова",
"Илиева",
"Кривошапкова",
"Чупетловска",
"Катърова",
"Бележкова",
"Мустакова",
"Пръндачка",
"Йоткова",
"Сланинкова",
"Мангъфова",
"Шкембова",
"Пенджакова",
"Пачаръзка",
"Куртажова",
"Плюнкова",
"Многознаева",
"Белоконска-Вражалска",
"Кучкуделова",
"Крушовска",
"Пищовколева",
"Сопаджиева",
"Точева-Клопова",
"Габровлиева",
"Първанова",
"Певецова",
"Яркова",
"Плюцова",
"Балканска",
)
prefixes_female = ("Г-жа", "Г-ца", "Др.")
prefixes_male = ("Г-н", "Др.")
formats_female = (
"{{first_name_female}} {{last_name_female}}",
"{{prefix_female}} {{first_name_female}} {{last_name_female}}",
)
formats_male = (
"{{first_name_male}} {{last_name_male}}",
"{{prefix_male}} {{first_name_male}} {{last_name_male}}",
)
formats = formats_male + formats_female
first_names = first_names_male + first_names_female
last_names = last_names_male + last_names_female
| Provider |
python | pyca__cryptography | src/cryptography/hazmat/primitives/ciphers/modes.py | {
"start": 3144,
"end": 6007
} | class ____(ModeWithInitializationVector, ModeWithAuthenticationTag):
name = "GCM"
_MAX_ENCRYPTED_BYTES = (2**39 - 256) // 8
_MAX_AAD_BYTES = (2**64) // 8
def __init__(
self,
initialization_vector: utils.Buffer,
tag: bytes | None = None,
min_tag_length: int = 16,
):
# OpenSSL 3.0.0 constrains GCM IVs to [64, 1024] bits inclusive
# This is a sane limit anyway so we'll enforce it here.
utils._check_byteslike("initialization_vector", initialization_vector)
if len(initialization_vector) < 8 or len(initialization_vector) > 128:
raise ValueError(
"initialization_vector must be between 8 and 128 bytes (64 "
"and 1024 bits)."
)
self._initialization_vector = initialization_vector
if tag is not None:
utils._check_bytes("tag", tag)
if min_tag_length < 4:
raise ValueError("min_tag_length must be >= 4")
if len(tag) < min_tag_length:
raise ValueError(
f"Authentication tag must be {min_tag_length} bytes or "
"longer."
)
self._tag = tag
self._min_tag_length = min_tag_length
@property
def tag(self) -> bytes | None:
return self._tag
@property
def initialization_vector(self) -> utils.Buffer:
return self._initialization_vector
def validate_for_algorithm(self, algorithm: CipherAlgorithm) -> None:
_check_aes_key_length(self, algorithm)
if not isinstance(algorithm, BlockCipherAlgorithm):
raise UnsupportedAlgorithm(
"GCM requires a block cipher algorithm",
_Reasons.UNSUPPORTED_CIPHER,
)
block_size_bytes = algorithm.block_size // 8
if self._tag is not None and len(self._tag) > block_size_bytes:
raise ValueError(
f"Authentication tag cannot be more than {block_size_bytes} "
"bytes."
)
utils.deprecated(
OFB,
__name__,
"OFB has been moved to "
"cryptography.hazmat.decrepit.ciphers.modes.OFB and "
"will be removed from "
"cryptography.hazmat.primitives.ciphers.modes in 49.0.0.",
utils.DeprecatedIn47,
name="OFB",
)
utils.deprecated(
CFB,
__name__,
"CFB has been moved to "
"cryptography.hazmat.decrepit.ciphers.modes.CFB and "
"will be removed from "
"cryptography.hazmat.primitives.ciphers.modes in 49.0.0.",
utils.DeprecatedIn47,
name="CFB",
)
utils.deprecated(
CFB8,
__name__,
"CFB8 has been moved to "
"cryptography.hazmat.decrepit.ciphers.modes.CFB8 and "
"will be removed from "
"cryptography.hazmat.primitives.ciphers.modes in 49.0.0.",
utils.DeprecatedIn47,
name="CFB8",
)
| GCM |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/context.py | {
"start": 22689,
"end": 23917
} | class ____:
"""a base for an adapter used for the DML RETURNING cases
Has a subset of the interface used by
:class:`.ORMAdapter` and is used for :class:`._QueryEntity`
instances to set up their columns as used in RETURNING for a
DML statement.
"""
__slots__ = ("mapper", "columns", "__weakref__")
def __init__(self, target_mapper, immediate_dml_mapper):
if (
immediate_dml_mapper is not None
and target_mapper.local_table
is not immediate_dml_mapper.local_table
):
# joined inh, or in theory other kinds of multi-table mappings
self.mapper = immediate_dml_mapper
else:
# single inh, normal mappings, etc.
self.mapper = target_mapper
self.columns = self.columns = util.WeakPopulateDict(
self.adapt_check_present # type: ignore
)
def __call__(self, col, as_filter):
for cc in sql_util._find_columns(col):
c2 = self.adapt_check_present(cc)
if c2 is not None:
return col
else:
return None
def adapt_check_present(self, col):
raise NotImplementedError()
| _DMLReturningColFilter |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/numpy/test_gen_data.py | {
"start": 2188,
"end": 44001
} | class ____:
pass
foos = st.tuples().map(lambda _: Foo())
def test_can_create_arrays_of_composite_types():
arr = minimal(nps.arrays(object, 100, elements=foos))
for x in arr:
assert isinstance(x, Foo)
@given(st.lists(st.integers()), st.data())
def test_can_create_zero_dim_arrays_of_lists(x, data):
arr = data.draw(nps.arrays(object, (), elements=st.just(x)))
assert arr.shape == ()
assert arr.dtype == np.dtype(object)
assert arr.item() == x
def test_can_create_arrays_of_tuples():
arr = minimal(
nps.arrays(object, 10, elements=st.tuples(st.integers(), st.integers())),
lambda x: all(t0 != t1 for t0, t1 in x),
)
assert all(a in ((1, 0), (0, 1)) for a in arr)
@given(nps.arrays(object, (2, 2), elements=st.tuples(st.integers())))
def test_does_not_flatten_arrays_of_tuples(arr):
assert isinstance(arr[0][0], tuple)
@given(
nps.arrays(object, (2, 2), elements=st.lists(st.integers(), min_size=1, max_size=1))
)
def test_does_not_flatten_arrays_of_lists(arr):
assert isinstance(arr[0][0], list)
@given(nps.array_shapes())
def test_can_generate_array_shapes(shape):
assert isinstance(shape, tuple)
assert all(isinstance(i, int) for i in shape)
@settings(
deadline=None, max_examples=10, suppress_health_check=[HealthCheck.nested_given]
)
@given(st.integers(0, 10), st.integers(0, 9), st.integers(0), st.integers(0))
def test_minimise_array_shapes(min_dims, dim_range, min_side, side_range):
smallest = minimal(
nps.array_shapes(
min_dims=min_dims,
max_dims=min_dims + dim_range,
min_side=min_side,
max_side=min_side + side_range,
)
)
assert len(smallest) == min_dims
assert all(k == min_side for k in smallest)
@pytest.mark.parametrize(
"kwargs", [{"min_side": 100}, {"min_dims": 15}, {"min_dims": 32}]
)
def test_interesting_array_shapes_argument(kwargs):
check_can_generate_examples(nps.array_shapes(**kwargs))
@given(nps.scalar_dtypes())
def test_can_generate_scalar_dtypes(dtype):
assert isinstance(dtype, np.dtype)
@settings(max_examples=100)
@given(
nps.nested_dtypes(
subtype_strategy=st.one_of(
nps.scalar_dtypes(), nps.byte_string_dtypes(), nps.unicode_string_dtypes()
)
)
)
def test_can_generate_compound_dtypes(dtype):
assert isinstance(dtype, np.dtype)
@settings(max_examples=100)
@given(
nps.nested_dtypes(
subtype_strategy=st.one_of(
nps.scalar_dtypes(), nps.byte_string_dtypes(), nps.unicode_string_dtypes()
)
).flatmap(lambda dt: nps.arrays(dtype=dt, shape=1))
)
def test_can_generate_data_compound_dtypes(arr):
# This is meant to catch the class of errors which prompted PR #2085
assert isinstance(arr, np.ndarray)
@given(nps.nested_dtypes())
def test_np_dtype_is_idempotent(dtype):
assert dtype == np.dtype(dtype)
def test_minimise_scalar_dtypes():
assert minimal(nps.scalar_dtypes()) == np.dtype("bool")
def test_minimise_nested_types():
assert minimal(nps.nested_dtypes()) == np.dtype("bool")
def test_minimise_array_strategy():
smallest = minimal(
nps.arrays(
nps.nested_dtypes(max_itemsize=200),
nps.array_shapes(max_dims=3, max_side=3),
)
)
assert smallest.dtype == np.dtype("bool")
assert not smallest.any()
@given(nps.array_dtypes(allow_subarrays=False))
def test_can_turn_off_subarrays(dt):
for name in dt.names:
assert dt.fields[name][0].shape == ()
def test_array_dtypes_may_have_field_titles():
find_any(nps.array_dtypes(), lambda dt: len(dt.fields) > len(dt.names))
@pytest.mark.parametrize("byteorder", ["<", ">"])
@given(data=st.data())
def test_can_restrict_endianness(data, byteorder):
dtype = data.draw(nps.integer_dtypes(endianness=byteorder, sizes=(16, 32, 64)))
if byteorder == ("<" if sys.byteorder == "little" else ">"):
assert dtype.byteorder == "="
else:
assert dtype.byteorder == byteorder
@given(nps.integer_dtypes(sizes=8))
def test_can_specify_size_as_an_int(dt):
assert dt.itemsize == 1
@given(st.data())
def test_can_draw_arrays_from_scalars(data):
dt = data.draw(nps.scalar_dtypes())
result = data.draw(nps.arrays(dtype=dt, shape=()))
assert isinstance(result, np.ndarray)
assert result.dtype == dt
@given(st.data())
def test_can_cast_for_arrays(data):
# Note: this only passes with castable datatypes, certain dtype
# combinations will result in an error if numpy is not able to cast them.
dt_elements = np.dtype(data.draw(st.sampled_from(["bool", "<i2", ">i2"])))
dt_desired = np.dtype(
data.draw(st.sampled_from(["<i2", ">i2", "float32", "float64"]))
)
result = data.draw(
nps.arrays(
dtype=dt_desired, elements=nps.from_dtype(dt_elements), shape=(1, 2, 3)
)
)
assert isinstance(result, np.ndarray)
assert result.dtype == dt_desired
@given(nps.arrays(dtype="int8", shape=st.integers(0, 20), unique=True))
def test_array_values_are_unique(arr):
assert len(set(arr)) == len(arr)
def test_cannot_generate_unique_array_of_too_many_elements():
strat = nps.arrays(dtype=int, elements=st.integers(0, 5), shape=10, unique=True)
with pytest.raises(InvalidArgument):
check_can_generate_examples(strat)
@given(
nps.arrays(
elements=st.just(0.0),
dtype=float,
fill=st.just(np.nan),
shape=st.integers(0, 20),
unique=True,
)
)
def test_array_values_are_unique_high_collision(arr):
assert (arr == 0.0).sum() <= 1
@given(nps.arrays(dtype="int8", shape=(4,), elements=st.integers(0, 3), unique=True))
def test_generates_all_values_for_unique_array(arr):
# Ensures that the "reject already-seen element" branch is covered
assert len(set(arr)) == len(arr)
@given(nps.arrays(dtype="int8", shape=255, unique=True))
def test_efficiently_generates_all_unique_array(arr):
# Avoids the birthday paradox with UniqueSampledListStrategy
assert len(set(arr)) == len(arr)
@given(st.data(), st.integers(-100, 100), st.integers(1, 100))
def test_array_element_rewriting(data, start, size):
arr = nps.arrays(
dtype=np.dtype("int64"),
shape=size,
elements=st.integers(start, start + size - 1),
unique=True,
)
assert set(data.draw(arr)) == set(range(start, start + size))
def test_may_fill_with_nan_when_unique_is_set():
find_any(
nps.arrays(
dtype=float,
elements=st.floats(allow_nan=False),
shape=10,
unique=True,
fill=st.just(np.nan),
),
lambda x: np.isnan(x).any(),
)
@given(
nps.arrays(
dtype=float,
elements=st.floats(allow_nan=False),
shape=10,
unique=True,
fill=st.just(np.nan),
)
)
def test_is_still_unique_with_nan_fill(xs):
assert len(set(xs)) == len(xs)
@fails_with(InvalidArgument)
@given(
nps.arrays(
dtype=float,
elements=st.floats(allow_nan=False),
shape=10,
unique=True,
fill=st.just(0.0),
)
)
def test_may_not_fill_with_non_nan_when_unique_is_set(arr):
pass
@fails_with(InvalidArgument)
@given(nps.arrays(dtype="U", shape=10, unique=True, fill=st.just("")))
def test_may_not_fill_with_non_nan_when_unique_is_set_and_type_is_not_number(arr):
pass
np_version = tuple(int(x) for x in np.__version__.split(".")[:2])
@pytest.mark.parametrize("fill", [False, True])
# Overflowing elements deprecated upstream in Numpy 1.24 :-)
@fails_with(
InvalidArgument
if np_version < (1, 24)
else (DeprecationWarning if np_version < (2, 0) else OverflowError)
)
@given(st.data())
def test_overflowing_integers_are_deprecated(fill, data):
kw = {"elements": st.just(300)}
if fill:
kw = {"elements": st.nothing(), "fill": kw["elements"]}
arr = data.draw(nps.arrays(dtype="int8", shape=(1,), **kw))
assert arr[0] == (300 % 256)
@pytest.mark.parametrize("fill", [False, True])
@pytest.mark.parametrize(
"dtype,strat",
[
("float16", st.floats(min_value=65520, allow_infinity=False)),
("float32", st.floats(min_value=10**40, allow_infinity=False)),
(
"complex64",
st.complex_numbers(min_magnitude=10**300, allow_infinity=False),
),
("U1", st.text(min_size=2, max_size=2)),
("S1", st.binary(min_size=2, max_size=2)),
],
)
@fails_with(InvalidArgument)
@given(data=st.data())
def test_unrepresentable_elements_are_deprecated(fill, dtype, strat, data):
if fill:
kw = {"elements": st.nothing(), "fill": strat}
else:
kw = {"elements": strat}
try:
arr = data.draw(nps.arrays(dtype=dtype, shape=(1,), **kw))
except RuntimeWarning:
assert np_version >= (1, 24), "New overflow-on-cast detection"
raise InvalidArgument("so the test passes") from None
try:
# This is a float or complex number, and has overflowed to infinity,
# triggering our deprecation for overflow.
assert np.isinf(arr[0])
except TypeError:
# We tried to call isinf on a string. The string was generated at
# length two, then truncated by the dtype of size 1 - deprecation
# again. If the first character was \0 it is now the empty string.
assert len(arr[0]) <= 1
@given(nps.arrays(dtype="float16", shape=(1,)))
def test_inferred_floats_do_not_overflow(arr):
pass
@given(nps.arrays(dtype="float16", shape=10, elements={"min_value": 0, "max_value": 1}))
def test_inferred_floats_can_be_constrained_at_low_width(arr):
assert (arr >= 0).all()
assert (arr <= 1).all()
@given(
nps.arrays(
dtype="float16",
shape=10,
elements={
"min_value": 0,
"max_value": 1,
"exclude_min": True,
"exclude_max": True,
},
)
)
def test_inferred_floats_can_be_constrained_at_low_width_excluding_endpoints(arr):
assert (arr > 0).all()
assert (arr < 1).all()
@given(
nps.arrays(
dtype="float16",
shape=10,
unique=True,
elements=st.integers(1, 9),
fill=st.just(np.nan),
)
)
def test_unique_array_with_fill_can_use_all_elements(arr):
assume(len(set(arr)) == arr.size)
@given(nps.arrays(dtype="uint8", shape=25, unique=True, fill=st.nothing()))
def test_unique_array_without_fill(arr):
# This test covers the collision-related branches for fully dense unique arrays.
# Choosing 25 of 256 possible elements means we're almost certain to see collisions
# thanks to the 'birthday paradox', but finding unique elemennts is still easy.
assume(len(set(arr)) == arr.size)
@given(ndim=st.integers(0, 5), data=st.data())
def test_mapped_positive_axes_are_unique(ndim, data):
min_size = data.draw(st.integers(0, ndim), label="min_size")
max_size = data.draw(st.integers(min_size, ndim), label="max_size")
axes = data.draw(
nps.valid_tuple_axes(ndim, min_size=min_size, max_size=max_size), label="axes"
)
assert len(set(axes)) == len({i if 0 < i else ndim + i for i in axes})
@given(ndim=st.integers(0, 5), data=st.data())
def test_length_bounds_are_satisfied(ndim, data):
min_size = data.draw(st.integers(0, ndim), label="min_size")
max_size = data.draw(st.integers(min_size, ndim), label="max_size")
axes = data.draw(
nps.valid_tuple_axes(ndim, min_size=min_size, max_size=max_size), label="axes"
)
assert min_size <= len(axes) <= max_size
@given(shape=nps.array_shapes(), data=st.data())
def test_axes_are_valid_inputs_to_sum(shape, data):
x = np.zeros(shape, dtype="uint8")
axes = data.draw(nps.valid_tuple_axes(ndim=len(shape)), label="axes")
np.sum(x, axes)
@settings(
deadline=None, max_examples=10, suppress_health_check=[HealthCheck.nested_given]
)
@given(ndim=st.integers(0, 3), data=st.data())
def test_minimize_tuple_axes(ndim, data):
min_size = data.draw(st.integers(0, ndim), label="min_size")
max_size = data.draw(st.integers(min_size, ndim), label="max_size")
smallest = minimal(nps.valid_tuple_axes(ndim, min_size=min_size, max_size=max_size))
assert len(smallest) == min_size
assert all(k > -1 for k in smallest)
@settings(
deadline=None, max_examples=10, suppress_health_check=[HealthCheck.nested_given]
)
@given(ndim=st.integers(0, 3), data=st.data())
def test_minimize_negative_tuple_axes(ndim, data):
min_size = data.draw(st.integers(0, ndim), label="min_size")
max_size = data.draw(st.integers(min_size, ndim), label="max_size")
smallest = minimal(
nps.valid_tuple_axes(ndim, min_size=min_size, max_size=max_size),
lambda x: all(i < 0 for i in x),
)
assert len(smallest) == min_size
@given(nps.broadcastable_shapes((), min_side=0, max_side=0, min_dims=0, max_dims=0))
def test_broadcastable_empty_shape(shape):
assert shape == ()
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(shape=ANY_SHAPE, data=st.data())
def test_broadcastable_shape_bounds_are_satisfied(shape, data):
min_dims = data.draw(st.integers(0, 32), label="min_dims")
max_dims = data.draw(st.none() | st.integers(min_dims, 32), label="max_dims")
min_side = data.draw(st.integers(0, 3), label="min_side")
max_side = data.draw(st.none() | st.integers(min_side, 6), label="max_side")
try:
bshape = data.draw(
nps.broadcastable_shapes(
shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
),
label="bshape",
)
except InvalidArgument:
raise UnsatisfiedAssumption from None
if max_dims is None:
max_dims = max(len(shape), min_dims) + 2
if max_side is None:
max_side = max((*shape[::-1][:max_dims], min_side)) + 2
assert isinstance(bshape, tuple)
assert all(isinstance(s, int) for s in bshape)
assert min_dims <= len(bshape) <= max_dims
assert all(min_side <= s <= max_side for s in bshape)
@settings(deadline=None)
@given(num_shapes=st.integers(1, 4), base_shape=ANY_SHAPE, data=st.data())
def test_mutually_broadcastable_shape_bounds_are_satisfied(
num_shapes, base_shape, data
):
min_dims = data.draw(st.integers(0, 32), label="min_dims")
max_dims = data.draw(
st.one_of(st.none(), st.integers(min_dims, 32)), label="max_dims"
)
min_side = data.draw(st.integers(0, 3), label="min_side")
max_side = data.draw(
st.one_of(st.none(), st.integers(min_side, 6)), label="max_side"
)
try:
shapes, result = data.draw(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
),
label="shapes, result",
)
except InvalidArgument:
raise UnsatisfiedAssumption from None
if max_dims is None:
max_dims = max(len(base_shape), min_dims) + 2
if max_side is None:
max_side = max((*base_shape[::-1][:max_dims], min_side)) + 2
assert isinstance(shapes, tuple)
assert isinstance(result, tuple)
assert all(isinstance(s, int) for s in result)
for bshape in shapes:
assert isinstance(bshape, tuple)
assert all(isinstance(s, int) for s in bshape)
assert min_dims <= len(bshape) <= max_dims
assert all(min_side <= s <= max_side for s in bshape)
def _draw_valid_bounds(data, shape, max_dims, *, permit_none=True):
if max_dims == 0 or not shape:
return 0, None
smallest_side = min(shape[::-1][:max_dims])
min_strat = (
st.sampled_from([1, smallest_side])
if smallest_side > 1
else st.just(smallest_side)
)
min_side = data.draw(min_strat, label="min_side")
largest_side = max(max(shape[::-1][:max_dims]), min_side)
if permit_none:
max_strat = st.one_of(st.none(), st.integers(largest_side, largest_side + 2))
else:
max_strat = st.integers(largest_side, largest_side + 2)
max_side = data.draw(max_strat, label="max_side")
return min_side, max_side
def _broadcast_two_shapes(shape_a: nps.Shape, shape_b: nps.Shape) -> nps.Shape:
result = []
for a, b in zip_longest(reversed(shape_a), reversed(shape_b), fillvalue=1):
if a != b and (a != 1) and (b != 1):
raise ValueError(
f"shapes {shape_a!r} and {shape_b!r} are not broadcast-compatible"
)
result.append(a if a != 1 else b)
return tuple(reversed(result))
def _broadcast_shapes(*shapes):
"""Returns the shape resulting from broadcasting the
input shapes together.
Raises ValueError if the shapes are not broadcast-compatible"""
assert shapes, "Must pass >=1 shapes to broadcast"
return reduce(_broadcast_two_shapes, shapes, ())
@settings(deadline=None, max_examples=500)
@given(
shapes=st.lists(
nps.array_shapes(min_dims=0, min_side=0, max_dims=4, max_side=4), min_size=1
)
)
def test_broadcastable_shape_util(shapes):
"""Ensures that `_broadcast_shapes` raises when fed incompatible shapes,
and ensures that it produces the true broadcasted shape"""
if len(shapes) == 1:
assert _broadcast_shapes(*shapes) == shapes[0]
return
arrs = [np.zeros(s, dtype=np.uint8) for s in shapes]
try:
broadcast_out = np.broadcast_arrays(*arrs)
except ValueError:
with pytest.raises(ValueError):
_broadcast_shapes(*shapes)
return
broadcasted_shape = _broadcast_shapes(*shapes)
assert broadcast_out[0].shape == broadcasted_shape
@settings(deadline=None, max_examples=200)
@given(shape=ANY_NONZERO_SHAPE, data=st.data())
def test_broadcastable_shape_has_good_default_values(shape, data):
# This test ensures that default parameters can always produce broadcast-compatible shapes
broadcastable_shape = data.draw(
nps.broadcastable_shapes(shape), label="broadcastable_shapes"
)
# error if drawn shape for b is not broadcast-compatible
_broadcast_shapes(shape, broadcastable_shape)
@settings(deadline=None, max_examples=200)
@given(base_shape=ANY_SHAPE, num_shapes=st.integers(1, 10), data=st.data())
def test_mutually_broadcastableshapes_has_good_default_values(
num_shapes, base_shape, data
):
# This test ensures that default parameters can always produce broadcast-compatible shapes
shapes, result = data.draw(
nps.mutually_broadcastable_shapes(num_shapes=num_shapes, base_shape=base_shape),
label="shapes, result",
)
assert len(shapes) == num_shapes
# raises if shapes are not mutually-compatible
assert result == _broadcast_shapes(base_shape, *shapes)
@settings(deadline=None)
@given(min_dims=st.integers(0, 32), shape=ANY_SHAPE, data=st.data())
def test_broadcastable_shape_can_broadcast(min_dims, shape, data):
max_dims = data.draw(st.none() | st.integers(min_dims, 32), label="max_dims")
min_side, max_side = _draw_valid_bounds(data, shape, max_dims)
broadcastable_shape = data.draw(
nps.broadcastable_shapes(
shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
),
label="broadcastable_shapes",
)
# error if drawn shape for b is not broadcast-compatible
_broadcast_shapes(shape, broadcastable_shape)
@settings(deadline=None)
@given(
num_shapes=st.integers(1, 10),
min_dims=st.integers(0, 32),
base_shape=ANY_SHAPE,
data=st.data(),
)
def test_mutually_broadcastable_shape_can_broadcast(
num_shapes, min_dims, base_shape, data
):
max_dims = data.draw(st.none() | st.integers(min_dims, 32), label="max_dims")
min_side, max_side = _draw_valid_bounds(data, base_shape, max_dims)
shapes, result = data.draw(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
),
label="shapes, result",
)
# error if drawn shapes are not mutually broadcast-compatible
assert result == _broadcast_shapes(base_shape, *shapes)
@settings(
deadline=None, max_examples=50, suppress_health_check=[HealthCheck.nested_given]
)
@given(
num_shapes=st.integers(1, 3),
min_dims=st.integers(0, 5),
base_shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=5),
data=st.data(),
)
def test_minimize_mutually_broadcastable_shape(num_shapes, min_dims, base_shape, data):
# Ensure aligned dimensions of broadcastable shape minimizes to `(1,) * min_dims`
max_dims = data.draw(st.none() | st.integers(min_dims, 5), label="max_dims")
min_side, max_side = _draw_valid_bounds(
data, base_shape, max_dims, permit_none=False
)
if num_shapes > 1:
# shrinking gets a little bit hairy when we have empty axes
# and multiple num_shapes
assume(min_side > 0)
smallest_shapes, result = minimal(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
)
)
note(f"smallest_shapes: {smallest_shapes}")
note(f"result: {result}")
assert len(smallest_shapes) == num_shapes
assert result == _broadcast_shapes(base_shape, *smallest_shapes)
for smallest in smallest_shapes:
n_leading = max(len(smallest) - len(base_shape), 0)
n_aligned = max(len(smallest) - n_leading, 0)
note(f"n_leading: {n_leading}")
note(f"n_aligned: {n_aligned} {base_shape[-n_aligned:]}")
expected = [min_side] * n_leading + [
(min(1, i) if i != 1 else min_side) if min_side <= 1 <= max_side else i
for i in (base_shape[-n_aligned:] if n_aligned else ())
]
assert tuple(expected) == smallest
@settings(deadline=None)
@given(max_dims=st.integers(4, 6), data=st.data())
def test_broadcastable_shape_adjusts_max_dim_with_explicit_bounds(max_dims, data):
# Ensures that `broadcastable_shapes` limits itself to satisfiable dimensions
# Broadcastable values can only be drawn for dims 0-3 for these shapes
shape = data.draw(st.sampled_from([(5, 3, 2, 1), (0, 3, 2, 1)]), label="shape")
broadcastable_shape = data.draw(
nps.broadcastable_shapes(
shape, min_side=2, max_side=3, min_dims=3, max_dims=max_dims
),
label="broadcastable_shapes",
)
assert len(broadcastable_shape) == 3
# error if drawn shape for b is not broadcast-compatible
_broadcast_shapes(shape, broadcastable_shape)
@settings(deadline=None)
@given(
max_side=st.sampled_from([3, None]),
min_dims=st.integers(0, 4),
num_shapes=st.integers(1, 3),
data=st.data(),
)
def test_mutually_broadcastable_shape_adjusts_max_dim_with_default_bounds(
max_side, min_dims, num_shapes, data
):
# Ensures that `mutually_broadcastable_shapes` limits itself to satisfiable dimensions
# when a default `max_dims` is derived.
base_shape = data.draw(
st.sampled_from([(5, 3, 2, 1), (0, 3, 2, 1)]), label="base_shape"
)
try:
shapes, result = data.draw(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=2,
max_side=max_side,
min_dims=min_dims,
),
label="shapes, result",
)
except InvalidArgument:
# There is no satisfiable `max_dims` for us to tune
assert min_dims == 4
assert max_side == 3 or base_shape[0] == 0
return
if max_side == 3 or base_shape[0] == 0:
assert all(len(s) <= 3 for s in shapes)
elif min_dims == 4:
assert all(4 <= len(s) for s in shapes)
# error if drawn shape for b is not broadcast-compatible
assert len(shapes) == num_shapes
assert result == _broadcast_shapes(base_shape, *shapes)
@settings(
deadline=None, max_examples=10, suppress_health_check=[HealthCheck.nested_given]
)
@given(min_dims=st.integers(0, 32), min_side=st.integers(2, 3), data=st.data())
def test_broadcastable_shape_shrinking_with_singleton_out_of_bounds(
min_dims, min_side, data
):
max_dims = data.draw(st.none() | st.integers(min_dims, 32), label="max_dims")
max_side = data.draw(st.none() | st.integers(min_side, 6), label="max_side")
shape = data.draw(st.integers(1, 4).map(lambda n: n * (1,)), label="shape")
smallest = minimal(
nps.broadcastable_shapes(
shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
)
)
assert smallest == (min_side,) * min_dims
@settings(
deadline=None, max_examples=50, suppress_health_check=[HealthCheck.nested_given]
)
@given(
num_shapes=st.integers(1, 4),
min_dims=st.integers(0, 4),
min_side=st.integers(2, 3),
data=st.data(),
)
def test_mutually_broadcastable_shapes_shrinking_with_singleton_out_of_bounds(
num_shapes, min_dims, min_side, data
):
"""Ensures that shapes minimize to `(min_side,) * min_dims` when singleton dimensions
are disallowed."""
max_dims = data.draw(st.none() | st.integers(min_dims, 4), label="max_dims")
max_side = data.draw(
st.one_of(st.none(), st.integers(min_side, 6)), label="max_side"
)
ndims = data.draw(st.integers(1, 4), label="ndim")
base_shape = (1,) * ndims
smallest_shapes, result = minimal(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=min_side,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
)
)
note(f"(smallest_shapes, result): {(smallest_shapes, result)}")
assert len(smallest_shapes) == num_shapes
assert result == _broadcast_shapes(base_shape, *smallest_shapes)
for smallest in smallest_shapes:
assert smallest == (min_side,) * min_dims
@settings(suppress_health_check=[HealthCheck.too_slow])
@given(
num_shapes=st.integers(1, 4),
min_dims=st.integers(1, 32),
max_side=st.integers(1, 6),
data=st.data(),
)
def test_mutually_broadcastable_shapes_only_singleton_is_valid(
num_shapes, min_dims, max_side, data
):
"""Ensures that, when all aligned base-shape dim sizes are larger
than ``max_side``, only singletons can be drawn"""
max_dims = data.draw(st.integers(min_dims, 32), label="max_dims")
base_shape = data.draw(
nps.array_shapes(min_side=max_side + 1, min_dims=1), label="base_shape"
)
input_shapes, result = data.draw(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=1,
max_side=max_side,
min_dims=min_dims,
max_dims=max_dims,
),
label="input_shapes, result",
)
assert len(input_shapes) == num_shapes
assert result == _broadcast_shapes(base_shape, *input_shapes)
for shape in input_shapes:
assert all(i == 1 for i in shape[-len(base_shape) :])
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
@given(
shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=5),
max_dims=st.integers(0, 6),
data=st.data(),
)
def test_broadcastable_shape_can_generate_arbitrary_ndims(shape, max_dims, data):
# ensures that generates shapes can possess any length in [min_dims, max_dims]
desired_ndim = data.draw(st.integers(0, max_dims), label="desired_ndim")
min_dims = data.draw(
st.one_of(st.none(), st.integers(0, desired_ndim)), label="min_dims"
)
# check default arg behavior too
kwargs = {"min_dims": min_dims} if min_dims is not None else {}
find_any(
nps.broadcastable_shapes(shape, min_side=0, max_dims=max_dims, **kwargs),
lambda x: len(x) == desired_ndim,
settings(max_examples=10**6),
)
@settings(deadline=None)
@given(
num_shapes=st.integers(1, 3),
base_shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=5),
max_dims=st.integers(0, 4),
data=st.data(),
)
def test_mutually_broadcastable_shapes_can_generate_arbitrary_ndims(
num_shapes, base_shape, max_dims, data
):
# ensures that each generated shape can possess any length in [min_dims, max_dims]
desired_ndims = data.draw(
st.lists(st.integers(0, max_dims), min_size=num_shapes, max_size=num_shapes),
label="desired_ndims",
)
min_dims = data.draw(
st.one_of(st.none(), st.integers(0, min(desired_ndims))), label="min_dims"
)
# check default arg behavior too
kwargs = {"min_dims": min_dims} if min_dims is not None else {}
find_any(
nps.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_side=0,
max_dims=max_dims,
**kwargs,
),
lambda x: {len(s) for s in x.input_shapes} == set(desired_ndims),
settings(max_examples=10**6),
)
@settings(deadline=None, suppress_health_check=list(HealthCheck))
@given(
base_shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=2),
max_dims=st.integers(1, 4),
)
def test_mutually_broadcastable_shapes_can_generate_interesting_singletons(
base_shape, max_dims
):
find_any(
nps.mutually_broadcastable_shapes(
num_shapes=2,
base_shape=base_shape,
min_side=0,
max_dims=max_dims,
),
lambda x: any(a != b for a, b in zip(*(s[::-1] for s in x.input_shapes), strict=False)), # type: ignore
)
@pytest.mark.parametrize("base_shape", [(), (0,), (1,), (2,), (1, 2), (2, 1), (2, 2)])
def test_mutually_broadcastable_shapes_can_generate_mirrored_singletons(base_shape):
def f(shapes: nps.BroadcastableShapes):
x, y = shapes.input_shapes
return x.count(1) == 1 and y.count(1) == 1 and x[::-1] == y
find_any(
nps.mutually_broadcastable_shapes(
num_shapes=2,
base_shape=base_shape,
min_side=0,
max_side=3,
min_dims=2,
max_dims=2,
),
f,
)
@settings(deadline=None)
@given(
shape=nps.array_shapes(min_dims=1, min_side=1),
dtype=st.one_of(nps.unsigned_integer_dtypes(), nps.integer_dtypes()),
data=st.data(),
)
def test_advanced_integer_index_is_valid_with_default_result_shape(shape, dtype, data):
index = data.draw(nps.integer_array_indices(shape, dtype=dtype))
x = np.zeros(shape)
out = x[index] # raises if the index is invalid
assert not np.shares_memory(x, out) # advanced indexing should not return a view
assert all(dtype == x.dtype for x in index)
@settings(deadline=None)
@given(
shape=nps.array_shapes(min_dims=1, min_side=1),
min_dims=st.integers(0, 3),
min_side=st.integers(0, 3),
dtype=st.one_of(nps.unsigned_integer_dtypes(), nps.integer_dtypes()),
data=st.data(),
)
def test_advanced_integer_index_is_valid_and_satisfies_bounds(
shape, min_dims, min_side, dtype, data
):
max_side = data.draw(st.integers(min_side, min_side + 2), label="max_side")
max_dims = data.draw(st.integers(min_dims, min_dims + 2), label="max_dims")
index = data.draw(
nps.integer_array_indices(
shape,
result_shape=nps.array_shapes(
min_dims=min_dims,
max_dims=max_dims,
min_side=min_side,
max_side=max_side,
),
dtype=dtype,
)
)
x = np.zeros(shape)
out = x[index] # raises if the index is invalid
assert all(min_side <= s <= max_side for s in out.shape)
assert min_dims <= out.ndim <= max_dims
assert not np.shares_memory(x, out) # advanced indexing should not return a view
assert all(dtype == x.dtype for x in index)
@settings(deadline=None, suppress_health_check=[HealthCheck.nested_given])
@given(
shape=nps.array_shapes(min_dims=1, min_side=1),
min_dims=st.integers(0, 3),
min_side=st.integers(0, 3),
dtype=st.sampled_from(["uint8", "int8"]),
data=st.data(),
)
def test_advanced_integer_index_minimizes_as_documented(
shape, min_dims, min_side, dtype, data
):
max_side = data.draw(st.integers(min_side, min_side + 2), label="max_side")
max_dims = data.draw(st.integers(min_dims, min_dims + 2), label="max_dims")
result_shape = nps.array_shapes(
min_dims=min_dims, max_dims=max_dims, min_side=min_side, max_side=max_side
)
smallest = minimal(
nps.integer_array_indices(shape, result_shape=result_shape, dtype=dtype)
)
desired = len(shape) * (np.zeros(min_dims * [min_side]),)
assert len(smallest) == len(desired)
for s, d in zip(smallest, desired, strict=True):
np.testing.assert_array_equal(s, d)
@settings(
deadline=None, max_examples=10, suppress_health_check=[HealthCheck.nested_given]
)
@given(
shape=nps.array_shapes(min_dims=1, max_dims=2, min_side=1, max_side=3),
data=st.data(),
)
def test_advanced_integer_index_can_generate_any_pattern(shape, data):
# ensures that generated index-arrays can be used to yield any pattern of elements from an array
x = np.arange(np.prod(shape)).reshape(shape)
target_array = data.draw(
nps.arrays(
shape=nps.array_shapes(min_dims=1, max_dims=2, min_side=1, max_side=2),
elements=st.sampled_from(x.flatten()),
dtype=x.dtype,
),
label="target",
)
def index_selects_values_in_order(index):
selected = x[index]
target(len(set(selected.flatten())), label="unique indices")
target(float(np.sum(target_array == selected)), label="elements correct")
return np.all(target_array == selected)
minimal(
nps.integer_array_indices(shape, result_shape=st.just(target_array.shape)),
index_selects_values_in_order,
settings(max_examples=10**6, phases=[Phase.generate, Phase.target]),
)
@pytest.mark.parametrize(
"condition",
[
lambda ix: isinstance(ix, tuple) and Ellipsis in ix,
lambda ix: isinstance(ix, tuple) and Ellipsis not in ix,
lambda ix: isinstance(ix, tuple) and np.newaxis in ix,
lambda ix: isinstance(ix, tuple) and np.newaxis not in ix,
lambda ix: ix is Ellipsis,
lambda ix: ix == np.newaxis,
],
)
def test_basic_indices_options(condition):
indexers = nps.array_shapes(min_dims=0, max_dims=32).flatmap(
lambda shape: nps.basic_indices(shape, allow_newaxis=True)
)
find_any(indexers, condition)
def test_basic_indices_can_generate_empty_tuple():
find_any(nps.basic_indices(shape=(0, 0), allow_ellipsis=True), lambda ix: ix == ())
def test_basic_indices_can_generate_non_tuples():
find_any(
nps.basic_indices(shape=(0, 0), allow_ellipsis=True),
lambda ix: not isinstance(ix, tuple),
)
def test_basic_indices_can_generate_long_ellipsis():
# Runs of slice(None) - such as [0,:,:,:,0] - can be replaced by e.g. [0,...,0]
find_any(
nps.basic_indices(shape=(1, 0, 0, 0, 1), allow_ellipsis=True),
lambda ix: len(ix) == 3 and ix[1] == Ellipsis,
)
@given(
nps.basic_indices(shape=(0, 0, 0, 0, 0)).filter(
lambda idx: isinstance(idx, tuple) and Ellipsis in idx
)
)
def test_basic_indices_replaces_whole_axis_slices_with_ellipsis(idx):
# `slice(None)` (aka `:`) is the only valid index for an axis of size
# zero, so if all dimensions are 0 then a `...` will replace all the
# slices because we generate `...` for entire contiguous runs of `:`
assert slice(None) not in idx
def test_basic_indices_can_generate_indices_not_covering_all_dims():
# These "flat indices" are skippable in the underlying BasicIndexStrategy,
# so we ensure we're definitely generating them for nps.basic_indices().
find_any(
nps.basic_indices(shape=(3, 3, 3)),
lambda ix: (
(not isinstance(ix, tuple) and ix != Ellipsis)
or (isinstance(ix, tuple) and Ellipsis not in ix and len(ix) < 3)
),
settings=settings(max_examples=5_000),
)
@given(
shape=nps.array_shapes(min_dims=0, max_side=4)
| nps.array_shapes(min_dims=0, min_side=0, max_side=10),
allow_newaxis=st.booleans(),
allow_ellipsis=st.booleans(),
data=st.data(),
)
def test_basic_indices_generate_valid_indexers(
shape, allow_newaxis, allow_ellipsis, data
):
min_dims = data.draw(
st.integers(0, 5 if allow_newaxis else len(shape)), label="min_dims"
)
max_dims = data.draw(
st.none() | st.integers(min_dims, 32 if allow_newaxis else len(shape)),
label="max_dims",
)
indexer = data.draw(
nps.basic_indices(
shape,
min_dims=min_dims,
max_dims=max_dims,
allow_ellipsis=allow_ellipsis,
allow_newaxis=allow_newaxis,
),
label="indexer",
)
# Check that disallowed things are indeed absent
if not allow_newaxis:
if isinstance(indexer, tuple):
assert 0 <= len(indexer) <= len(shape) + int(allow_ellipsis)
else:
assert 1 <= len(shape) + int(allow_ellipsis)
assert np.newaxis not in shape
if not allow_ellipsis:
assert Ellipsis not in shape
if 0 in shape:
# If there's a zero in the shape, the array will have no elements.
array = np.zeros(shape)
assert array.size == 0
elif np.prod(shape) <= 10**5:
# If it's small enough to instantiate, do so with distinct elements.
array = np.arange(np.prod(shape)).reshape(shape)
else:
# We can't cheat on this one, so just try another.
assume(False)
view = array[indexer]
if not np.isscalar(view):
assert min_dims <= view.ndim <= (32 if max_dims is None else max_dims)
if view.size:
assert np.shares_memory(view, array)
# addresses https://github.com/HypothesisWorks/hypothesis/issues/2582
@given(
nps.arrays(
shape=nps.array_shapes(min_dims=0, min_side=0), dtype=nps.floating_dtypes()
)
)
def test_array_owns_memory(x: np.ndarray):
assert x.base is None
assert x[...].base is x
@given(st.data())
def test_no_recursion_in_multi_line_reprs_issue_3560(data):
data.draw(nps.arrays(shape=(2,), dtype=float).map(lambda x: x))
data.draw(
nps.arrays(
shape=(2,),
dtype=float,
).map(lambda x: x)
)
def test_infers_elements_and_fill():
# Regression test for https://github.com/HypothesisWorks/hypothesis/issues/3900
# We only infer a fill strategy if the elements_strategy has reusable values,
# and the interaction of two performance fixes broke this. Oops...
s = unwrap_strategies(nps.arrays(dtype=np.uint32, shape=1))
assert isinstance(s, nps.ArrayStrategy)
assert repr(s.element_strategy) == f"integers(0, {2**32-1})"
assert repr(s.fill) == f"integers(0, {2**32-1})"
# But we _don't_ infer a fill if the elements strategy is non-reusable
elems = st.builds(lambda x: x * 2, st.integers(1, 10)).map(np.uint32)
assert not elems.has_reusable_values
s = unwrap_strategies(nps.arrays(dtype=np.uint32, shape=1, elements=elems))
assert s.fill.is_empty
@given(nps.arrays(np.dtype("O"), shape=nps.array_shapes()))
def test_object_arrays_are_of_type_object(obj_array):
assert obj_array.dtype == np.dtype("O")
def test_class_instances_not_allowed_in_scalar_array():
class A:
pass
s = nps.arrays(
nps.scalar_dtypes(),
shape=nps.array_shapes(),
elements=st.just(A()),
)
# can raise ValueError during generation. For example if scalar_dtype is
# corresponds to a datetime, numpy will raise "cannot convert A to a datetime".
with pytest.raises((InvalidArgument, ValueError)):
check_can_generate_examples(s)
def test_object_arrays_with_mixed_elements_has_object_dtype():
class A:
pass
s = nps.arrays(
np.dtype("O"),
shape=nps.array_shapes(),
elements=st.just(A()) | st.integers(),
)
assert_all_examples(s, lambda arr: arr.dtype == np.dtype("O"))
find_any(s, lambda arr: len({type(x) for x in arr.ravel()}) > 1)
@given(st.data())
def test_object_array_can_hold_arbitrary_class_instances(data):
instance = data.draw(st.from_type(type).flatmap(st.from_type))
s = nps.arrays(np.dtype("O"), nps.array_shapes(), elements=st.just(instance))
arr = data.draw(s)
assert all(v is instance for v in arr.ravel())
def test_object_array_can_hold_incomparable_elements():
class Incomparable:
def __eq__(self, other):
raise TypeError
check_can_generate_examples(
nps.arrays(
np.dtype("O"),
nps.array_shapes(),
elements=st.just(Incomparable()),
)
)
def test_can_generate_nested_object_arrays():
int_arrays = nps.arrays(np.dtype("int"), nps.array_shapes())
s = nps.arrays(np.dtype("O"), nps.array_shapes(), elements=int_arrays)
check_can_generate_examples(s)
| Foo |
python | pytorch__pytorch | test/inductor/test_torchinductor_strided_blocks.py | {
"start": 6912,
"end": 50739
} | class ____:
@parametrize(
"expected_num_block_pointers,raises",
[
(3, False), # This should pass
(9, True), # This should fail
],
)
def test_expected_num_block_pointers(
self, expected_num_block_pointers: int, raises: bool
):
"""
Checks that the test harness verifies the number of block pointers correctly.
"""
def foo(x, y):
return x + y
device = torch.device(self.device)
inputs = [torch.randn(8).to(device) for arg_idx in range(2)]
# Expect failure for bad inputs
with self.assertRaises(AssertionError) if raises else contextlib.nullcontext():
# Expect 3 block pointers: 2 inputs 1 output
self._run_and_compare(
foo,
*inputs,
expected_num_block_pointers=expected_num_block_pointers,
)
@parametrize("prefer_nd_tiling", [False, True])
@parametrize(
"full_size,view_size,stride,offset,require_block_ptr",
[
((64, 32, 32), (32, 16, 8), None, None, True),
((16, 8, 8, 8), (8, 8, 4, 2), None, None, True),
((8, 8, 8, 8), (4, 4, 4, 4), None, None, True),
((8, 8), (4, 4), None, 10, True), # Storage offset
((8, 8), (4, 4), (16, 2), None, True), # Non-default strides
((8, 8), (4, 4), (1, 8), None, True), # Transposed strides
(
(5, 9),
(5, 8),
None,
None,
True,
), # Non-power-of-2 leading dim: block ptr
(
(15, 9),
(15, 3),
None,
None,
False,
), # Non-power-of-2 inner dims: non-block ptr
((1, 1, 1), (1, 1, 1), None, None, False), # Scalar: non-block ptr
subtest(
arg_values=(
(2, 4 * max_block),
(2, 3 * max_block),
None,
None,
True,
), # Inner dim multiple of max_block
decorators=[
test_torchinductor.skip_if_triton_cpu("Triton CPU: slow test")
],
),
],
)
def test_pointwise(
self,
full_size: tuple[int, ...],
view_size: tuple[int, ...],
stride: Optional[tuple[int, ...]],
offset: Optional[int],
require_block_ptr: bool,
prefer_nd_tiling: bool,
):
"""
Test generating strided ND block pointers for a pointwise kernel.
If require_block_ptr is True, the generated code must contain block
pointers. However, ND block pointers are not supported for all shapes. So
we also test some odd shapes with require_block_ptr set to False, to ensure that
block pointer analysis does not break these cases.
"""
def get_input() -> torch.Tensor:
device = torch.device(self.device)
full = torch.randn(full_size).to(device)
# Use the original tensor's stride by default
view_stride = full.stride() if stride is None else stride
return torch.as_strided(full, view_size, view_stride, storage_offset=offset)
args = [get_input() for arg_idx in range(2)]
# Expect 3 block pointers: 2 inputs 1 output
self._run_and_compare(
torch.add,
*args,
expected_num_block_pointers=3 if require_block_ptr else None,
config_patches={"triton.prefer_nd_tiling": prefer_nd_tiling},
)
@parametrize("prefer_nd_tiling", [False, True])
@parametrize(
"x_size,y_size",
[
((8, 8), (8, 1)),
((8, 8), (1, 8)),
(
(4, 1, 4),
(1, 4, 1),
), # Very important case: index variables are disjoint!
(
(1, 1, 1, 4),
(4, 4, 4, 4),
), # Unmatched dims for first operand.
],
)
def test_broadcast(
self, x_size: tuple[int, ...], y_size: tuple[int, ...], prefer_nd_tiling: bool
):
"""
Test that we can generate strided block pointers when inputs have different
shapes, and they are broadcast together.
"""
def foo(x, y):
a = x + 1
b = y * 2
return a + b
x, y = (
self._discontiguous_tensor(size, self.device) for size in (x_size, y_size)
)
# Check that input sizes are not the same
self.assertNotEqual(x.shape, y.shape)
# Check that at least one dimension is a singleton
all_dims = x.shape + y.shape
self.assertIn(1, all_dims)
# Expect 3 block pointers: 2 inputs one output
self._run_and_compare(
foo,
x,
y,
expected_num_block_pointers=3,
config_patches={"triton.prefer_nd_tiling": prefer_nd_tiling},
)
def test_broadcast_with_singleton_dims(self):
# This tests the case when the input / output contains both zero strides
# and singleton dimensions. In this case the broadcasting dimensions
# generated for the descriptor need to ignore dimensions that have zero
# strides with size 1
# This is a minified repro based on HuggingFaceTB/SmolLM2-135M
# original issue:
# store index=x2 + 192*y0 + 64*y1
# matched block params = BlockParameters(
# shape=[3, 4, 1, 1, 64],
# block_shape=[((YBLOCK + 3)//4), Min(4, YBLOCK), 1, 1, XBLOCK],
# strides=[64, 192, 0, 0, 1],
# offsets=[(yoffset//4), ModularIndexing(yoffset, 1, 4), 0, 0, xoffset]
# )
# broadcasting_dims=[False, False, True, True, False]
# broadcast_shape=[((YBLOCK + 3)//4), Min(4, YBLOCK), XBLOCK]
# error, len(broadcasting_dims) != broadcast_shape
def forward(expand_4, permute_4, mul_7):
clone = torch.ops.aten.clone.default(
expand_4, memory_format=torch.contiguous_format
)
expand_4 = None
view_4 = torch.ops.aten.view.default(clone, [1, 4, 64])
clone = None
cos = torch.ops.aten.cos.default(view_4)
view_4 = None
mul = torch.ops.aten.mul.Tensor(cos, 1.0)
cos = None
unsqueeze_4 = torch.ops.aten.unsqueeze.default(mul, 1)
mul = None
mul_6 = torch.ops.aten.mul.Tensor(permute_4, unsqueeze_4)
permute_4 = unsqueeze_4 = None
add_3 = torch.ops.aten.add.Tensor(mul_6, mul_7)
mul_6 = mul_7 = None
unsqueeze_6 = torch.ops.aten.unsqueeze.default(add_3, 2)
add_3 = None
return (unsqueeze_6,)
def load_args(reader):
buf0 = reader.storage(storage_hash=None, nbytes=512, device=self.device)
reader.tensor(buf0, (1, 4, 2, 32), (128, 1, 0, 4), is_leaf=True) # expand_4
buf1 = reader.storage(storage_hash=None, nbytes=3072, device=self.device)
reader.tensor(
buf1, (1, 3, 4, 64), (768, 64, 192, 1), is_leaf=True
) # permute_4
buf2 = reader.storage(storage_hash=None, nbytes=3072, device=self.device)
reader.tensor(buf2, (1, 3, 4, 64), is_leaf=True) # mul_7
load_args._version = 0
input_reader = InputReader()
load_args(input_reader)
args = input_reader.args
if self.device == "xpu":
atol = 1e-7
rtol = 1e-5
else:
atol = None
rtol = None
self._run_and_compare(
forward,
*args,
expected_num_block_pointers=4,
atol=atol,
rtol=rtol,
)
@parametrize(
"x_size,y_size",
[
((32, 1), (32, 32)),
((1, 8), (8, 8)),
# ((4, 1, 3), (4, 5, 3)), # TODO: T207754224
((4, 1, 3), (4, 4, 3)),
((1, 5, 5), (5, 5, 5)),
((5, 5, 1), (5, 5, 5)),
((5, 1, 1), (5, 5, 5)),
((1, 1, 5), (5, 5, 5)),
((1, 5, 1), (5, 5, 5)),
((7, 1, 1, 4), (7, 3, 4, 4)),
((5, 6, 1, 1), (5, 6, 4, 3)),
],
)
def test_expand_broadcast(self, x_size: tuple[int, ...], y_size: tuple[int, ...]):
"""
When the load and store have different shapes, we should use broadcast.
"""
def foo(x, y_size):
return x.expand(y_size).clone()
def get_input(size: tuple[int, ...]) -> torch.Tensor:
device = torch.device(self.device)
full = torch.randn(size).to(device)
view = torch.as_strided(full, size, full.stride())
return view
x = get_input(x_size)
y = y_size
# Check that input sizes are not the same
self.assertNotEqual(x_size, y_size)
# Check that is valid broadcast
self.assertEqual(len(x_size), len(y_size))
for i, j in zip(x_size, y_size):
if i != 1:
self.assertEqual(i, j)
result, (triton_code,) = self._run_and_compare(foo, x, y)
@xfail_if_use_tensor_descriptor
@parametrize("prefer_nd_tiling", [False, True])
@config.patch("triton.skip_l1_cache", False)
def test_pointwise_broadcast_nonzero_strides(self, prefer_nd_tiling: bool):
"""
Test that we emit tl.broadcast_to instead of using strides of 0.
"""
full_shape = (8, 8)
col_shape = (full_shape[1], 1)
device = torch.device(self.device)
full = torch.randn(full_shape).to(device)
col = torch.as_strided(full, col_shape, full.stride())
# Expect 3 block pointers: 2 inputs one output
result, (triton_code,) = self._run_and_compare(
torch.add,
full,
col,
expected_num_block_pointers=3,
config_patches={
"triton.prefer_nd_tiling": prefer_nd_tiling,
},
)
# Check the code for broadcasts.
# We shouldn't see any strides of 0.
load_lines, store_lines = tuple(
self._get_lines_containing_substr(triton_code, substr)
for substr in ("tl.load", "tl.store")
)
if prefer_nd_tiling:
self.assertExpectedInline(
load_lines,
"""\
tmp0 = tl.load(tl.make_block_ptr(in_ptr0, shape=[8, 8], strides=[8, 1], block_shape=[YBLOCK, XBLOCK], order=[1, 0], offsets=[yoffset, xoffset]), boundary_check=[0, 1])
tmp1 = tl.load(tl.make_block_ptr(in_ptr1, shape=[8], strides=[8], block_shape=[YBLOCK], order=[0], offsets=[yoffset]), boundary_check=[0], eviction_policy='evict_last')[:, None]""", # noqa: B950
)
self.assertExpectedInline(
store_lines,
""" tl.store(tl.make_block_ptr(out_ptr0, shape=[8, 8], strides=[8, 1], block_shape=[YBLOCK, XBLOCK], order=[1, 0], offsets=[yoffset, xoffset]), tl.broadcast_to(tmp2, [YBLOCK, XBLOCK]).to(tl.float32), boundary_check=[0, 1])""", # noqa: B950
)
else:
self.assertExpectedInline(
load_lines,
"""\
tmp0 = tl.load(tl.make_block_ptr(in_ptr0, shape=[64], strides=[1], block_shape=[XBLOCK], order=[0], offsets=[xoffset]), boundary_check=[0])
tmp1 = tl.reshape(tl.broadcast_to(tl.load(tl.make_block_ptr(in_ptr1, shape=[8], strides=[8], block_shape=[(7 + XBLOCK) // 8], order=[0], offsets=[xoffset // 8]), boundary_check=[0], eviction_policy='evict_last')[:, None, None], [(7 + XBLOCK) // 8, ((1) * ((1) <= ((7 + XBLOCK) // 8)) + ((7 + XBLOCK) // 8) * (((7 + XBLOCK) // 8) < (1))), ((8) * ((8) <= (XBLOCK)) + (XBLOCK) * ((XBLOCK) < (8)))]), [XBLOCK])""", # noqa: B950
)
self.assertExpectedInline(
store_lines,
""" tl.store(tl.make_block_ptr(out_ptr0, shape=[64], strides=[1], block_shape=[XBLOCK], order=[0], offsets=[xoffset]), tl.broadcast_to(tmp2, [XBLOCK]).to(tl.float32), boundary_check=[0])""", # noqa: B950
)
@parametrize("prefer_nd_tiling", [False, True])
@parametrize(
"view_size,num_block_pointers,num_triton_kernels",
[
((4, 4), 1, 1),
((4, 4, 4), 1, 1),
((8, 8, 8), 1, 1),
((15, 15), None, 1), # Non-power of 2
# Multiple of max block. Uses loops.
subtest(
arg_values=((3 * max_block, 2), 3, 2),
decorators=[
test_torchinductor.skip_if_triton_cpu("Triton CPU: slow test")
],
),
(
(2, 3 * max_block),
2,
2,
), # Multiple of max block. Uses loops.
((128, 128), 3, 2), # Test a large size, with loops.
],
)
def test_reduction(
self,
view_size: tuple[int, ...],
num_block_pointers: int,
num_triton_kernels: int,
prefer_nd_tiling: bool,
):
"""
Tests a reduction kernel.
"""
if self.device == "cpu" and all(
# Multiple of max block. Uses loops.
[
view_size == (3 * max_block, 2),
num_block_pointers == 3,
num_triton_kernels == 2,
prefer_nd_tiling is False,
]
):
raise unittest.SkipTest(
"Long test and raises BrokenProcessPool Error if triton CPU"
)
device = torch.device(self.device)
view = self._discontiguous_tensor(view_size, self.device)
if num_triton_kernels == 2 and config.triton.cooperative_reductions:
# fewer kernels with cooperative reductions
num_triton_kernels = 1
num_block_pointers -= 2
# Expect at least 1 block pointer for the input.
# Add 2 more if we generate 2 kernels.
result, (code,) = self._run_and_compare(
torch.sum,
view,
expected_num_block_pointers=num_block_pointers,
expected_num_triton_kernels=num_triton_kernels,
config_patches={"triton.prefer_nd_tiling": prefer_nd_tiling},
)
@parametrize(
"view_size,num_block_pointers,num_triton_kernels",
[
((8, 8), 2, 1), # No loops. Should be supported.
(
(128, 128),
None,
None,
), # Looped reduction. Block pointers not yet supported.
],
)
def test_mixed_pointwise_reduction(
self,
view_size: tuple[int, ...],
num_block_pointers: int,
num_triton_kernels: int,
):
"""
Tests mixing pointwise with reduction ops.
"""
def foo(x, y):
return torch.sum(x + y)
inputs = [
self._discontiguous_tensor(view_size, self.device) for input_idx in range(2)
]
# Expect 2 block pointers: inputs
result, (code,) = self._run_and_compare(
foo,
*inputs,
expected_num_block_pointers=num_block_pointers,
expected_num_triton_kernels=num_triton_kernels,
)
@xfail_if_use_tensor_descriptor
def test_multiple_max_block_non_power_of_2(self):
"""
Check that we support dims of size n * MAX_BLOCK, where n is any positive integer, not
necessarily a power of 2.
"""
def foo(x):
return x - 1
device = torch.device(self.device)
full_size = (3 * max_block, 3)
view_size = (3 * max_block, 2)
full = torch.randn(full_size).to(device)
view = torch.as_strided(full, view_size, full.stride())
# Check that we're using dims that aren't all powers of 2
have_np2_dim = not all(is_power_of_2(dim) for dim in view_size)
self.assertTrue(have_np2_dim)
# Check that we need more than one stride to represent the tensor
nontrivial_dims = [dim for dim in view_size if dim > 1]
self.assertTrue(len(nontrivial_dims) > 1)
# Expect 2 block pointers: input and output
self._run_and_compare(foo, view, expected_num_block_pointers=2)
@parametrize(
"nd_tiling,num_block_pointers",
[
subtest(
(True, 2), decorators=[xfail_if_use_tensor_descriptor]
), # With tiling, the index is affine.
(False, 1), # We can't infer that the load is a power of 2.
],
)
def test_dynamic_shapes_pointwise(self, nd_tiling: bool, num_block_pointers: int):
"""
Test a pointwise kernel with dynamic shapes.
"""
view_size = (4, 4)
view = self._discontiguous_tensor(view_size, self.device)
self._run_and_compare(
torch.div,
view,
view,
expected_num_block_pointers=num_block_pointers,
config_patches={"triton.prefer_nd_tiling": nd_tiling},
compile_kwargs={"dynamic": True},
)
@parametrize(
"with_tiling,num_block_pointers",
[
subtest(
(True, 1), decorators=[xfail_if_use_tensor_descriptor]
), # With tiling, the index is affine.
(False, 0), # We can't infer that the load is a power of 2.
],
)
def test_dynamic_shapes_reduction(self, with_tiling: bool, num_block_pointers: int):
"""
Test a reduction kernel with dynamic shapes.
"""
view_size = (4, 4)
view = self._discontiguous_tensor(view_size, self.device)
self._run_and_compare(
torch.prod,
view,
expected_num_block_pointers=num_block_pointers,
config_patches={
"triton.prefer_nd_tiling": with_tiling,
"triton.tile_reductions": with_tiling,
},
compile_kwargs={"dynamic": True},
)
@unittest.skip(reason="Dynamo tracing error")
def test_dynamic_shapes_pointwise_multiple_max_block(self):
"""
Test dynamic shapes, where we know the shape is a multiple of the max block
size. We should be able to generate a block pointer for this case.
"""
def foo(x):
tile_dims = (3 * max_block * x.shape[0], 3 * x.shape[1])
view_size = (3 * max_block * x.shape[0], 2 * x.shape[1])
full = x.tile(tile_dims)
view = torch.as_strided(full, view_size, full.stride())
return view + view
device = torch.device(self.device)
x_size = (1, 1)
x = torch.randn(x_size).to(device)
# Expect 2 block pointers: input and output
self._run_and_compare(
x, compile_kwargs={"dynamic": True}, expected_num_block_pointers=2
)
@decorateIf(
xfail_if_use_tensor_descriptor,
lambda param_kwargs: not (
param_kwargs["num_block_pointers"] == 3 and param_kwargs["num_tiles"] == 1
),
)
@parametrize(
"full_size,view_size,num_block_pointers,num_tiles",
[
(
(32, 32),
(16, 32),
3,
1,
), # Contiguous 2D tensor. Does not require tiling.
((5, 9), (3, 7), 3, 2), # 2D tensor with 1 discontiguous dim.
((11, 13, 7), (9, 13, 5), 3, 2), # 3D tensor with 1 discontiguous dim (2).
subtest(
arg_values=(
(3, 11, 13, 7),
(2, 9, 13, 7),
3,
2,
),
decorators=[
test_torchinductor.skip_if_triton_cpu("Triton CPU: slow test")
],
), # 4D tensor with 1 discontiguous dim (1).
(
(3, 11, 13, 7),
(2, 11, 9, 7),
3,
2,
),
# 4D tensor with 1 discontiguous dim (2).
(
(5, 5, 5, 5, 5),
(3, 3, 5, 3, 5),
1,
2,
), # 5D tensor with 2 discontiguous dims (3, 1). Block pointers unexpected.
],
)
def test_nd_tiling_odd_shapes_pointwise(
self,
full_size: tuple[int, ...],
view_size: tuple[int, ...],
num_block_pointers: int,
num_tiles: int,
):
"""
Test odd shapes with ND tiling enabled.
Uses a pointwise op.
"""
def get_input() -> torch.Tensor:
device = torch.device(self.device)
full = torch.randn(full_size).to(device)
return torch.as_strided(full, view_size, full.stride())
args = [get_input() for arg_idx in range(2)]
# Expect up to 3 block pointers: 2 inputs 1 output.
result, code = self._run_and_compare(
torch.add,
*args,
expected_num_block_pointers=num_block_pointers,
config_patches={
"triton.prefer_nd_tiling": True,
},
)
# Check the code for the expected tiling.
all_tiles = ("XBLOCK", "YBLOCK", "ZBLOCK")
expected_tiles = set(all_tiles[:num_tiles])
for tile_name in all_tiles:
for program in code:
if tile_name in expected_tiles:
self.assertIn(tile_name, program)
else:
self.assertNotIn(tile_name, program)
@xfail_if_use_tensor_descriptor
@parametrize(
"view_size,num_block_pointers,num_triton_kernels,reduction_op",
[
((15, 15), 1, 1, torch.sum), # Non-power-of 2 shapes.
((129, 129), 3, 2, torch.sum), # Large size, with loops.
((3, 3), 1, 1, torch.argmax),
((129, 129), 1, 1, torch.argmax),
((5, 5), 1, 1, torch.var_mean), # Reduction + pointwise fusion.
],
)
def test_2d_reduction_odd_shapes(
self,
view_size: tuple[int, ...],
num_block_pointers: int,
num_triton_kernels: int,
reduction_op: Callable,
):
"""
Tests 2D reduction kernels. These arise from "odd" shapes which are not
expressible with a 1D block pointer.
"""
view = self._discontiguous_tensor(view_size, self.device)
# Expect at least 1 block pointer for the input.
# Add 2 more if we generate 2 kernels.
result, (code,) = self._run_and_compare(
reduction_op,
view,
expected_num_block_pointers=num_block_pointers,
expected_num_triton_kernels=num_triton_kernels,
config_patches=tiled_reduction_config,
)
# Check the code for multiple Rn_BLOCK's
self._assert_reduction_ndims(code, 2)
@parametrize(
"size,expected_num_block_pointers,expected_num_triton_kernels,expect_fallback",
[
((8, 8), 1, 1, True), # Persistent Welford fallback
subtest(
((128, 128), 7, 2, False), decorators=[xfail_if_use_tensor_descriptor]
), # Looped Welford reduction
],
)
def test_2d_welford_reduction(
self,
size: tuple[int, ...],
expected_num_block_pointers: int,
expected_num_triton_kernels: int,
expect_fallback: bool,
):
"""
Tests a 2D welford reduction.
NB: the input size should be "nice" in the sense that it's a multiple of the
number of processors. Otherwise, we will get more complex indexing that
doesn't generate a block pointer. Since tiling welford reductions depends on
the block pointer analysis, those cases would fall back to 1D.
"""
view = self._discontiguous_tensor(size, self.device)
# We expect many block pointers for this one.
result, (code,) = self._run_and_compare(
torch.var_mean,
view,
expected_num_block_pointers=expected_num_block_pointers,
expected_num_triton_kernels=expected_num_triton_kernels,
config_patches=tiled_reduction_config,
)
# Check for a Welford reduction.
self.assertEqual("welford" in code, not expect_fallback)
# Check for 2 reduction dimensions.
self._assert_reduction_ndims(code, 2)
@test_torchinductor.skip_if_triton_cpu("Triton CPU: slow test")
def test_welford_non_block_pointer(
self,
):
"""
Tests a welford reduction where block pointer analysis fails.
The main loop will be a 1D reduction, instead of 2D.
"""
# Use a "bad" size that's not evenly divisible by the launch grid.
# This won't decompose into a block pointer.
view = self._discontiguous_tensor((259, 311), self.device)
# We expect many block pointers for this one.
result, (code,) = self._run_and_compare(
torch.var_mean,
view,
expected_num_block_pointers=6,
expected_num_triton_kernels=2,
config_patches={"triton.prefer_nd_tiling": True},
)
# Check for a Welford reduction.
self.assertIn("welford", code)
# Check for a single reduction dimension.
self._assert_reduction_ndims(code, 1)
def test_reduction_multiple_discontiguous_dims(self):
"""
Test reducing a tensor with more than one discontiguous dimension. This case
won't generate a block pointer, since we don'allow enough tiling dimensions.
"""
# Use odd shapes to frustrate block pointer analysis.
view = self._discontiguous_tensor((3, 7, 11), self.device)
result, (code,) = self._run_and_compare(
torch.sum,
view,
expected_num_block_pointers=0,
expected_num_triton_kernels=1,
config_patches=tiled_reduction_config,
)
# Check for 2 reduction dimensions.
self._assert_reduction_ndims(code, 2)
@skipIfXpu(
msg="AssertionError: Scalars are not equal!, "
"https://github.com/intel/torch-xpu-ops/issues/2332"
)
@xfail_if_use_tensor_descriptor # Cannot use TMA API for store with no x dimension.
@test_torchinductor.skip_if_triton_cpu # Illegal instruction File; cannot xfail because it crashes process
def test_2d_reduction_multi_kernel(self):
"""
Test a 2D reduction in multi kernel mode.
"""
view = self._discontiguous_tensor((2, 4, 1024), self.device)
def foo(x):
"""
Reshape to 2D and take the softmax of all trailing dims.
"""
x = x.reshape(x.shape[0], -1)
return torch.softmax(x, -1)
result, (code,) = self._run_and_compare(
foo,
view,
expected_num_block_pointers=5,
expected_num_triton_kernels=2,
config_patches={
"triton.multi_kernel": True,
**tiled_reduction_config,
},
)
# Check for multi kernel mode.
self.assertIn("multi_kernel", code)
# Check for 2 reduction dimensions.
self._assert_reduction_ndims(code, 2)
@xfail_if_use_tensor_descriptor
def test_fused_2d_reduction(
self,
):
"""
Tests fusing multiple reductions on the same input, with 2D tiling.
"""
def foo(x):
return torch.sum(x) + torch.argmax(x)
view_size = (5, 7)
view = self._discontiguous_tensor(view_size, self.device)
# Expect at least 1 block pointer for the input.
result, (code,) = self._run_and_compare(
foo,
view,
expected_num_block_pointers=1,
expected_num_triton_kernels=1,
config_patches=tiled_reduction_config,
)
# Check the code for multiple Rn_BLOCK's
self._assert_reduction_ndims(code, 2)
@parametrize("reduction_op", [torch.sum, torch.argmax])
def test_2d_reductions_mixed_indexing(
self,
reduction_op: Callable,
):
"""
Tests a program with multiple reductions using different strides.
These might not be fused.
"""
def foo(*args):
return sum(reduction_op(arg) for arg in args)
view_size = (5, 7)
arg0 = self._discontiguous_tensor(view_size, self.device)
arg1 = torch.empty(view_size)
# No guarantees on the number of kernels or pointers.
result, (code,) = self._run_and_compare(
foo,
arg0,
arg1,
config_patches=tiled_reduction_config,
)
# Check the code for multiple Rn_BLOCK's
self._assert_reduction_ndims(code, 2)
@parametrize(
"tile_reductions",
[False, subtest(True, decorators=[xfail_if_use_tensor_descriptor])],
)
def test_enable_tiled_reductions(self, tile_reductions: bool):
"""
Tests enabling and disabling tiled reductions.
"""
view = self._discontiguous_tensor((9, 11), self.device)
# If tiled, we expect 1 block pointer for the input.
result, (code,) = self._run_and_compare(
torch.sum,
view,
expected_num_block_pointers=1 if tile_reductions else 0,
expected_num_triton_kernels=1,
config_patches={
"triton.prefer_nd_tiling": True,
"triton.tile_reductions": tile_reductions,
},
)
# Check the code for multiple Rn_BLOCK's
self._assert_reduction_ndims(code, 2 if tile_reductions else 1)
@xfail_if_use_tensor_descriptor
def test_complex_reshape_block_ptr(self):
def func(x, y):
add_ = x + y
reshape_0 = add_.reshape([8, 16, 128])
permute_0 = reshape_0.permute([0, 2, 1])
reshape_1 = permute_0.reshape([1024, 16])
clone_0 = reshape_1.clone(memory_format=torch.contiguous_format)
permute_1 = clone_0.permute([1, 0])
clone_1 = permute_1.clone(memory_format=torch.contiguous_format)
return clone_0, clone_1
inps = (torch.rand((8, 2048), device=self.device, dtype=torch.float32),) * 2
result, code = self._run_and_compare(
func,
*inps,
expected_num_triton_kernels=2,
expected_num_block_pointers=4,
)
self.assertTrue("Min" not in code[0])
@xfail_if_use_tensor_descriptor
@requires_gpu() # FIXME this test failed on Triton-CPU
def test_3d_permute_tiling(self):
"""
Test 3D tiling with permute.
"""
def foo(x, y, z):
dims = [0, 2, 1]
a = x.permute(dims=dims) + y
b = (z + y).permute(dims=dims)
return a + b
inps = (torch.rand((51, 51, 51), device=self.device, dtype=torch.float32),) * 3
result, (code,) = self._run_and_compare(
foo,
*inps,
expected_num_triton_kernels=1,
expected_num_block_pointers=3,
config_patches={
"triton.max_tiles": 3,
"triton.prefer_nd_tiling": True,
},
)
# Check for 3D tiling
self._assert_pointwise_ndims(code, 3)
@torch._dynamo.config.patch({"capture_scalar_outputs": True})
@parametrize("num_tile_candidates", (1, 2))
def test_unbacked_size_on_non_contig_dim(self, num_tile_candidates: int):
# NUM_REPEAT should determine # of candidate_tilings.
NUM_REPEAT = 2 if num_tile_candidates == 2 else 8
def foo(x, length):
unbacked = length.item()
repeated = x.repeat(1, unbacked, NUM_REPEAT)
# permute creates split in middle with unbacked symint is the first range
# ranges: [33*unbacked, NUM_REPEAT, 64]
permute120 = repeated.permute([1, 2, 0])
return permute120.cos()
inps = (
torch.rand((64, 33, 1), device=self.device, dtype=torch.float32),
torch.scalar_tensor(16, device=self.device, dtype=torch.int32),
)
with torch._dynamo.config.patch({"capture_scalar_outputs": True}):
self._run_and_compare(
foo,
*inps,
expected_num_triton_kernels=1,
expected_num_block_pointers=0,
config_patches={
"triton.max_tiles": 3,
"triton.prefer_nd_tiling": True,
},
)
# block_ptr advancements should also be deferrered conditional
# on the associated buffer not being removed
# in this case the bernoulli operation is fused with the following sum
# so an output buffer is not needed to store the immediate result of the
# bernoulli operation
# TODO: fails for triton CPU "Failed to convert to LLVM IR"
@test_torchinductor.xfail_if_triton_cpu
# Disable split_reductions on this test for now due to the interaction with LOAF
@config.patch(split_reductions=False)
def test_removed_buffers(self):
from torch.ops import aten
def fn(a):
return aten.bernoulli(a).sum() / torch.prod(torch.tensor(a.size()))
p = 0.3
result, code = self._run_and_compare(
fn,
*[torch.ones(200, 200, device=self.device) * p],
expected_num_triton_kernels=1,
expected_num_block_pointers=1,
atol=p * 0.06,
rtol=0.06,
)
@xfail_if_use_tensor_descriptor
def test_pointwise_index_order(self):
"""
Test the order of indices in pointwise kernels. Expect Z to be the leading dim,
then Y, then X.
"""
inps = [
self._discontiguous_tensor((5, 5, 5), device=self.device) for _ in range(2)
]
result, (triton_code,) = self._run_and_compare(
torch.add,
*inps,
expected_num_triton_kernels=1,
expected_num_block_pointers=3,
config_patches={
"triton.max_tiles": 3,
"triton.prefer_nd_tiling": True,
},
)
# Check the load and store for block pointer strides.
load_lines, store_lines, index_lines = tuple(
self._get_lines_containing_substr(triton_code, substr)
for substr in ("tl.load", "tl.store", "index =")
)
self.assertExpectedInline(
load_lines,
"""\
tmp0 = tl.load(tl.make_block_ptr(in_ptr0, shape=[5, 5, 5], strides=[100, 10, 1], block_shape=[ZBLOCK, YBLOCK, XBLOCK], order=[2, 1, 0], offsets=[zoffset, yoffset, xoffset]), boundary_check=[0, 1, 2])
tmp1 = tl.load(tl.make_block_ptr(in_ptr1, shape=[5, 5, 5], strides=[100, 10, 1], block_shape=[ZBLOCK, YBLOCK, XBLOCK], order=[2, 1, 0], offsets=[zoffset, yoffset, xoffset]), boundary_check=[0, 1, 2])""", # noqa: B950
)
self.assertExpectedInline(
store_lines,
""" tl.store(tl.make_block_ptr(out_ptr0, shape=[5, 5, 5], strides=[25, 5, 1], block_shape=[ZBLOCK, YBLOCK, XBLOCK], order=[2, 1, 0], offsets=[zoffset, yoffset, xoffset]), tl.broadcast_to(tmp2, [ZBLOCK, YBLOCK, XBLOCK]).to(tl.float32), boundary_check=[0, 1, 2])""", # noqa: B950
)
# Check the indices. These are used for non-block pointers.
self.assertExpectedInline(
index_lines,
"""\
zindex = zoffset + tl.arange(0, ZBLOCK)[:, None, None]
yindex = yoffset + tl.arange(0, YBLOCK)[None, :, None]
xindex = xoffset + tl.arange(0, XBLOCK)[None, None, :]""", # noqa: B950
)
def test_expand_clone_broadcast(self):
"""
Test expand followed by clone. This uses an explicit Triton broadcast.
"""
base_size = (1, 32)
expanded_size = (32, 32)
def foo(x):
return x.expand(*expanded_size).clone()
inps = [torch.randn(base_size, device=self.device)]
result, (triton_code,) = self._run_and_compare(
foo,
*inps,
expected_num_triton_kernels=1,
expected_num_block_pointers=2,
config_patches={
"triton.max_tiles": 3,
"triton.prefer_nd_tiling": True,
},
)
# We should only need one broadcast.
num_broadcasts = triton_code.count("tl.broadcast_to")
self.assertEqual(num_broadcasts, 1)
def test_mul_broadcast_multi_output(self):
def foo(x, y, z):
a = x * y
b = 128.0
c = a * b
d = a * z
e = x * z
return a, c, d, e
inps = [
torch.randn((8, 11, 128), device=self.device),
torch.randn((128,), device=self.device),
torch.randn((8, 11, 128), device=self.device),
]
result, (triton_code,) = self._run_and_compare(
foo,
*inps,
expected_num_triton_kernels=1,
expected_num_block_pointers=7,
config_patches={
"triton.max_tiles": 3,
"triton.prefer_nd_tiling": True,
},
)
# Check that the tiling is 2D, even though we allow up to 3D.
# Singleton splits should be discarded.
self._assert_pointwise_ndims(triton_code, 2)
# Integration test to ensure that matched dims & strides from match_mod_div_expr
# are unsigned and signed integers respectively. This test case has the following
# index:=(ModularIndexing(xindex, 4, 4)) + 4*(ModularIndexing(xindex, 32, 2))
# and the match below is a candidate that is invalid:
# match={
# dim_mod4_: 32, dim_mod3_: 2, stride_mod3_: 4, dim_mod2_: 1/16,
# dim_mod1_: 4, stride_mod1_: 1, stride_mod4_: 0, stride_mod2_: 0, stride_mod0_: 0
# }
# This is now fixed by ensuring that that wild symbols only match integers
@xfail_if_use_tensor_descriptor
@skipIfXpu(
msg="Triton issue exposed by new driver, will be resolved after next triton update."
)
def test_ensure_integral_dims_and_strides(self):
def model(data, *args):
return torch.nn.functional.unfold(data, *args)
data = torch.zeros(
[2, 3, 5, 5], dtype=torch.float16, requires_grad=True, device=self.device
)
args = [2, 1, 0, 1]
self._run_and_compare(
model,
data,
*args,
expected_num_triton_kernels=2,
expected_num_block_pointers=4,
compile_kwargs={"fullgraph": True},
)
# Integration test to test block analysis with index expressions using
# negative strides.
# This test case has the following index:
# index_relative_to_xyr_index = -256*((xindex//64)) - (ModularIndexing(xindex, 1, 8))
# - 16*(ModularIndexing(xindex, 8, 8)) + 1911
# subexpr = -256*((xindex//64)) - (ModularIndexing(xindex, 1, 8)) - 16*(ModularIndexing(xindex, 8, 8))
# Block analysis should produce the following:
# BlockParameters(
# shape=[8, 8, 8],
# block_shape=[((XBLOCK + 63)//64), Min(8, ((XBLOCK + 7)//8)), Min(8, XBLOCK) ],
# strides=[-256, -16, -1],
# offsets=[(xoffset//64), ModularIndexing(xoffset, 8, 8), ModularIndexing(xoffset, 1, 8)]
# )
# constant_offset = 1911
@xfail_if_use_tensor_descriptor
def test_negative_strides(self):
def model(x, y):
# Slice in reverse order via a negative stride
return torch.flip(x, [0, 1, 2]) + y
x, y = (
self._discontiguous_tensor((8, 8, 8), device=self.device) for _ in range(2)
)
self._run_and_compare(
model,
x,
y,
expected_num_triton_kernels=1,
expected_num_block_pointers=3,
)
@config.patch("triton.prefer_nd_tiling", True)
@config.patch("triton.max_tiles", 3)
@parametrize(
"block_multiple, ynumel_exceed_ygrid_size, include_z",
[
# No boundary check in all dimensions
[True, False, True],
# No xdim boundary check, ydim is checked since > max_ygrid
# z dim can be used since its not included
[True, True, False],
# Boundary check in all dimensions
# skip triton_cpu very slow test > 1000s
subtest(
[False, False, True], decorators=[test_torchinductor.skip_if_triton_cpu]
),
],
)
@xfail_if_use_tensor_descriptor
def test_boundary_check(self, block_multiple, ynumel_exceed_ygrid_size, include_z):
@dataclasses.dataclass
class InputShape:
x: int
y: int
z: Optional[int] = None
def to_list(self):
out = [self.y, self.x]
if self.z is not None:
out.insert(0, self.z)
return out
BLOCK_SIZE = 8
DIM_SIZE = BLOCK_SIZE if block_multiple else BLOCK_SIZE + 1
shape = InputShape(DIM_SIZE, DIM_SIZE, DIM_SIZE if include_z else None)
if ynumel_exceed_ygrid_size:
shape.y = math.ceil(get_max_y_grid()) * shape.y + shape.y
# Use fixed block sizes to avoid having to generate very large input tensors
class FixedBlockSizeChoices(InductorChoices):
def triton_kernel_kwargs(self, kernel_cls, features, groups, kernel_kwargs):
block_sizes = {
f"{prefix.upper()}BLOCK": BLOCK_SIZE
for prefix, size in dataclasses.asdict(shape).items()
if size is not None
}
kernel_kwargs["fixed_config"] = FixedTritonConfig(block_sizes)
return kernel_kwargs
a = self._discontiguous_tensor(shape.to_list(), device=self.device)
b_shape = shape.to_list()
b_shape[-1] = 1
b = self._discontiguous_tensor(b_shape, device=self.device)
def func(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b
with V.set_choices_handler(FixedBlockSizeChoices()):
result, code = self._run_and_compare(
func,
a,
b,
expected_num_triton_kernels=1,
expected_num_block_pointers=3,
)
code = code[0]
if block_multiple:
if ynumel_exceed_ygrid_size:
self.assertIn(
"yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK",
code,
)
# Only the y dimension should be boundary checked
# a, b, and output
self.assertEqual(code.count("boundary_check=[0]"), 3)
else:
# No boundary checking
self.assertNotIn("boundary_check", code)
else:
# Loading a
self.assertTrue("boundary_check=[0, 1, 2]" in code)
# Loading b
self.assertTrue("boundary_check=[0, 1]" in code)
@unittest.skipIf(not TRITON_HAS_CPU, "requires triton CPU backend")
@config.patch(cpu_backend="triton")
@config.patch("triton.use_block_ptr", True)
| CommonTemplate |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1568890,
"end": 1570276
} | class ____(TextDef):
"""
ValueDefWithConditionStringFieldDefText schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalStringFieldDef`, :class:`ConditionalValueDefTextExprRef`, :class:`ConditionalParameterStringFieldDef`, :class:`ConditionalPredicateStringFieldDef`, :class:`ConditionalParameterValueDefTextExprRef`, :class:`ConditionalPredicateValueDefTextExprRef`, Sequence[dict, :class:`ConditionalValueDefTextExprRef`, :class:`ConditionalParameterValueDefTextExprRef`, :class:`ConditionalPredicateValueDefTextExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {"$ref": "#/definitions/ValueDefWithCondition<StringFieldDef,Text>"}
def __init__(
self,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
**kwds,
):
super().__init__(condition=condition, value=value, **kwds)
| ValueDefWithConditionStringFieldDefText |
python | getsentry__sentry | src/sentry/types/grouphash_metadata.py | {
"start": 4868,
"end": 5919
} | class ____(TypedDict):
"""
Data gathered when no other grouping method produces results
"""
# Whether we landed in the fallback because of a lack of data, because we had a stacktrace but
# all frames were ignored, or some other reason
fallback_reason: str
HashingMetadata = (
FingerprintHashingMetadata
| MessageHashingMetadata
| SaltedMessageHashingMetadata
| StacktraceHashingMetadata
| SaltedStacktraceHashingMetadata
| SecurityHashingMetadata
| SaltedSecurityHashingMetadata
| TemplateHashingMetadata
| SaltedTemplateHashingMetadata
| ChecksumHashingMetadata
| FallbackHashingMetadata
)
HashingMetadataWithFingerprint = (
FingerprintHashingMetadata
| SaltedMessageHashingMetadata
| SaltedStacktraceHashingMetadata
| SaltedSecurityHashingMetadata
| SaltedTemplateHashingMetadata
)
def has_fingerprint_data(
hashing_metadata: HashingMetadata,
) -> TypeIs[HashingMetadataWithFingerprint]:
return "fingerprint" in hashing_metadata
| FallbackHashingMetadata |
python | huggingface__transformers | src/transformers/models/sam2_video/modeling_sam2_video.py | {
"start": 56112,
"end": 58225
} | class ____(nn.Module):
def __init__(self, config: Sam2VideoMaskDecoderConfig):
super().__init__()
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.layers = nn.ModuleList()
for i in range(self.num_hidden_layers):
self.layers.append(Sam2VideoTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0)))
self.final_attn_token_to_image = Sam2VideoAttention(config)
self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size)
def forward(
self,
point_embeddings: Tensor,
image_embeddings: Tensor,
image_positional_embeddings: Tensor,
attention_similarity: Tensor,
target_embedding=None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
if image_embeddings is None:
raise ValueError("You have to specify an image_embedding")
image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1)
# Prepare queries
queries = point_embeddings
keys = image_embeddings
# Apply transformer blocks and final layernorm
for layer in self.layers:
if target_embedding is not None:
queries += target_embedding
queries, keys, _ = layer(
queries=queries,
keys=keys,
query_point_embedding=point_embeddings,
key_point_embedding=image_positional_embeddings,
attention_similarity=attention_similarity,
**kwargs,
)
# Apply the final attention layer from the points to the image
query = queries + point_embeddings
key = keys + image_positional_embeddings
attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys)
queries = queries + attn_out
queries = self.layer_norm_final_attn(queries)
return queries, keys
| Sam2VideoTwoWayTransformer |
python | getsentry__sentry | src/sentry/issues/endpoints/project_user_issue.py | {
"start": 898,
"end": 1472
} | class ____:
def __init__(self, data: dict):
self.data = data
def get_issue_type(self) -> type[GroupType]:
raise NotImplementedError
def get_issue_title(self) -> str:
raise NotImplementedError
def get_issue_subtitle(self) -> str:
raise NotImplementedError
def create_fingerprint(self) -> list[str]:
raise NotImplementedError
def get_tags(self) -> dict:
raise NotImplementedError
def get_evidence(self) -> tuple[dict, list[IssueEvidence]]:
raise NotImplementedError
| BaseUserIssueFormatter |
python | huggingface__transformers | src/transformers/models/deformable_detr/image_processing_deformable_detr.py | {
"start": 29227,
"end": 67520
} | class ____(BaseImageProcessor):
r"""
Constructs a Deformable DETR image processor.
Args:
format (`str`, *optional*, defaults to `"coco_detection"`):
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter
in the `preprocess` method. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_annotations (`bool`, *optional*, defaults to `True`):
Controls whether to convert the annotations to the format expected by the DETR model. Converts the
bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
method. If `True`, padding will be applied to the bottom and right of the image with zeros.
If `pad_size` is provided, the image will be padded to the specified dimensions.
Otherwise, the image will be padded to the maximum height and width of the batch.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = DeformableDetrImageProcessorKwargs
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
def __init__(
self,
format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_annotations: Optional[bool] = None,
do_pad: bool = True,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
) -> None:
max_size = None if size is None else kwargs.pop("max_size", 1333)
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
size = get_size_dict(size, max_size=max_size, default_to_square=False)
# Backwards compatibility
if do_convert_annotations is None:
do_convert_annotations = do_normalize
super().__init__(**kwargs)
self.format = format
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_convert_annotations = do_convert_annotations
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_pad = kwargs.pop("pad_and_return_pixel_mask", do_pad)
self.pad_size = pad_size
self._valid_processor_keys = [
"images",
"annotations",
"return_segmentation_masks",
"masks_path",
"do_resize",
"size",
"resample",
"do_rescale",
"rescale_factor",
"do_normalize",
"do_convert_annotations",
"image_mean",
"image_std",
"do_pad",
"pad_size",
"format",
"return_tensors",
"data_format",
"input_data_format",
]
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr
def prepare_annotation(
self,
image: np.ndarray,
target: dict,
format: Optional[AnnotationFormat] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> dict:
"""
Prepare an annotation for feeding into DeformableDetr model.
"""
format = format if format is not None else self.format
if format == AnnotationFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(
image, target, return_segmentation_masks, input_data_format=input_data_format
)
elif format == AnnotationFormat.COCO_PANOPTIC:
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_panoptic_annotation(
image,
target,
masks_path=masks_path,
return_masks=return_segmentation_masks,
input_data_format=input_data_format,
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, max_size=None, default_to_square=False)
if "shortest_edge" in size and "longest_edge" in size:
new_size = get_resize_output_image_size(
image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
)
elif "max_height" in size and "max_width" in size:
new_size = get_image_size_for_max_height_width(
image, size["max_height"], size["max_width"], input_data_format=input_data_format
)
elif "height" in size and "width" in size:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = resize(
image,
size=new_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return image
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
def resize_annotation(
self,
annotation,
orig_size,
size,
resample: PILImageResampling = PILImageResampling.NEAREST,
) -> dict:
"""
Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
to this number.
"""
return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
def rescale(
self,
image: np.ndarray,
rescale_factor: float,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict:
"""
Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
`[center_x, center_y, width, height]` format and from absolute to relative pixel values.
"""
return normalize_annotation(annotation, image_size=image_size)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
def _update_annotation_for_padded_image(
self,
annotation: dict,
input_image_size: tuple[int, int],
output_image_size: tuple[int, int],
padding,
update_bboxes,
) -> dict:
"""
Update the annotation for a padded image.
"""
new_annotation = {}
new_annotation["size"] = output_image_size
for key, value in annotation.items():
if key == "masks":
masks = value
masks = pad(
masks,
padding,
mode=PaddingMode.CONSTANT,
constant_values=0,
input_data_format=ChannelDimension.FIRST,
)
masks = safe_squeeze(masks, 1)
new_annotation["masks"] = masks
elif key == "boxes" and update_bboxes:
boxes = value
boxes *= np.asarray(
[
input_image_size[1] / output_image_size[1],
input_image_size[0] / output_image_size[0],
input_image_size[1] / output_image_size[1],
input_image_size[0] / output_image_size[0],
]
)
new_annotation["boxes"] = boxes
elif key == "size":
new_annotation["size"] = output_image_size
else:
new_annotation[key] = value
return new_annotation
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
def _pad_image(
self,
image: np.ndarray,
output_size: tuple[int, int],
annotation: Optional[dict[str, Any]] = None,
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
update_bboxes: bool = True,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image,
padding,
mode=PaddingMode.CONSTANT,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
if annotation is not None:
annotation = self._update_annotation_for_padded_image(
annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
)
return padded_image, annotation
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
def pad(
self,
images: list[np.ndarray],
annotations: Optional[Union[AnnotationType, list[AnnotationType]]] = None,
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
update_bboxes: bool = True,
pad_size: Optional[dict[str, int]] = None,
) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
images (list[`np.ndarray`]):
Images to pad.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
Annotations to transform according to the padding that is applied to the images.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
update_bboxes (`bool`, *optional*, defaults to `True`):
Whether to update the bounding boxes in the annotations to match the padded images. If the
bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
format, the bounding boxes will not be updated.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
pad_size = pad_size if pad_size is not None else self.pad_size
if pad_size is not None:
padded_size = (pad_size["height"], pad_size["width"])
else:
padded_size = get_max_height_width(images, input_data_format=input_data_format)
annotation_list = annotations if annotations is not None else [None] * len(images)
padded_images = []
padded_annotations = []
for image, annotation in zip(images, annotation_list):
padded_image, padded_annotation = self._pad_image(
image,
padded_size,
annotation,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
update_bboxes=update_bboxes,
)
padded_images.append(padded_image)
padded_annotations.append(padded_annotation)
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [
make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format)
for image in images
]
data["pixel_mask"] = masks
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
]
return encoded_inputs
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
def preprocess(
self,
images: ImageInput,
annotations: Optional[Union[AnnotationType, list[AnnotationType]]] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample=None, # PILImageResampling
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
do_convert_annotations: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
format: Optional[Union[str, AnnotationFormat]] = None,
return_tensors: Optional[Union[TensorType, str]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
pad_size: Optional[dict[str, int]] = None,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
List of annotations associated with the image or batch of images. If annotation is for object
detection, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "annotations" (`list[Dict]`): List of annotations for an image. Each annotation should be a
dictionary. An image can have no annotations, in which case the list should be empty.
If annotation is for segmentation, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "segments_info" (`list[Dict]`): List of segments for an image. Each segment should be a dictionary.
An image can have no segments, in which case the list should be empty.
- "file_name" (`str`): The file name of the image.
return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
Whether to return segmentation masks.
masks_path (`str` or `pathlib.Path`, *optional*):
Path to the directory containing the segmentation masks.
do_resize (`bool`, *optional*, defaults to self.do_resize):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to self.size):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
resample (`PILImageResampling`, *optional*, defaults to self.resample):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to self.do_rescale):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
Rescale factor to use when rescaling the image.
do_normalize (`bool`, *optional*, defaults to self.do_normalize):
Whether to normalize the image.
do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
Whether to convert the annotations to the format expected by the model. Converts the bounding
boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
and in relative coordinates.
image_mean (`float` or `list[float]`, *optional*, defaults to self.image_mean):
Mean to use when normalizing the image.
image_std (`float` or `list[float]`, *optional*, defaults to self.image_std):
Standard deviation to use when normalizing the image.
do_pad (`bool`, *optional*, defaults to self.do_pad):
Whether to pad the image. If `True`, padding will be applied to the bottom and right of
the image with zeros. If `pad_size` is provided, the image will be padded to the specified
dimensions. Otherwise, the image will be padded to the maximum height and width of the batch.
format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
Format of the annotations.
return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
Type of tensors to return. If `None`, will return the list of images.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
pad_size (`dict[str, int]`, *optional*):
The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size
provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest
height and width in the batch.
"""
do_resize = self.do_resize if do_resize is None else do_resize
size = self.size if size is None else size
size = get_size_dict(size=size, default_to_square=False)
resample = self.resample if resample is None else resample
do_rescale = self.do_rescale if do_rescale is None else do_rescale
rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = self.do_normalize if do_normalize is None else do_normalize
image_mean = self.image_mean if image_mean is None else image_mean
image_std = self.image_std if image_std is None else image_std
do_convert_annotations = (
self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
)
do_pad = self.do_pad if do_pad is None else do_pad
pad_size = self.pad_size if pad_size is None else pad_size
format = self.format if format is None else format
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor.")
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
# Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
format = AnnotationFormat(format)
if annotations is not None:
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
if (
masks_path is not None
and format == AnnotationFormat.COCO_PANOPTIC
and not isinstance(masks_path, (pathlib.Path, str))
):
raise ValueError(
"The path to the directory containing the mask PNG files should be provided as a"
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
)
# All transformations expect numpy arrays
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
if annotations is not None:
prepared_images = []
prepared_annotations = []
for image, target in zip(images, annotations):
target = self.prepare_annotation(
image,
target,
format,
return_segmentation_masks=return_segmentation_masks,
masks_path=masks_path,
input_data_format=input_data_format,
)
prepared_images.append(image)
prepared_annotations.append(target)
images = prepared_images
annotations = prepared_annotations
del prepared_images, prepared_annotations
# transformations
if do_resize:
if annotations is not None:
resized_images, resized_annotations = [], []
for image, target in zip(images, annotations):
orig_size = get_image_size(image, input_data_format)
resized_image = self.resize(
image, size=size, resample=resample, input_data_format=input_data_format
)
resized_annotation = self.resize_annotation(
target, orig_size, get_image_size(resized_image, input_data_format)
)
resized_images.append(resized_image)
resized_annotations.append(resized_annotation)
images = resized_images
annotations = resized_annotations
del resized_images, resized_annotations
else:
images = [
self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [
self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
]
if do_convert_annotations and annotations is not None:
annotations = [
self.normalize_annotation(annotation, get_image_size(image, input_data_format))
for annotation, image in zip(annotations, images)
]
if do_pad:
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
encoded_inputs = self.pad(
images,
annotations=annotations,
return_pixel_mask=True,
data_format=data_format,
input_data_format=input_data_format,
update_bboxes=do_convert_annotations,
return_tensors=return_tensors,
pad_size=pad_size,
)
else:
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in images
]
encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100
):
"""
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
__all__ = ["DeformableDetrImageProcessor"]
| DeformableDetrImageProcessor |
python | django__django | tests/filtered_relation/models.py | {
"start": 172,
"end": 594
} | class ____(models.Model):
name = models.CharField(max_length=50, unique=True)
favorite_books = models.ManyToManyField(
"Book",
related_name="preferred_by_authors",
related_query_name="preferred_by_authors",
)
content_type = models.ForeignKey(ContentType, models.CASCADE, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
| Author |
python | walkccc__LeetCode | solutions/1348. Tweet Counts Per Frequency/1348.py | {
"start": 42,
"end": 770
} | class ____:
def __init__(self):
self.tweetNameToTimes = collections.defaultdict(SortedList)
def recordTweet(self, tweetName: str, time: int) -> None:
self.tweetNameToTimes[tweetName].add(time)
def getTweetCountsPerFrequency(self, freq: str, tweetName: str,
startTime: int, endTime: int) -> list[int]:
counts = []
times = self.tweetNameToTimes[tweetName]
chunk = 60 if freq == 'minute' else 3600 if freq == 'hour' else 86400
# I := startTime of each chunk
for i in range(startTime, endTime + 1, chunk):
j = min(i + chunk, endTime + 1) # EndTime of each chunk
counts.append(bisect_left(times, j) - bisect_left(times, i))
return counts
| TweetCounts |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/data_sources/test_redshift.py | {
"start": 407,
"end": 2253
} | class ____:
"""This set of tests ensures that we can run expectations against every data
type supported by Redshift.
"""
COLUMN = "col_a"
@pytest.mark.redshift
def test_geometry(self):
column_type = REDSHIFT_TYPES.GEOMETRY
batch_setup = RedshiftBatchTestSetup(
config=RedshiftDatasourceTestConfig(column_types={self.COLUMN: column_type}),
data=pd.DataFrame(
{
self.COLUMN: [
"0103000020E61000000100000005000000000000000000000000000000000000000000000000000000000000000000F03F000000000000F03F000000000000F03F000000000000F03F000000000000000000000000000000000000000000000000"
]
}
),
extra_data={},
context=get_context(mode="ephemeral"),
)
with batch_setup.batch_test_context() as batch:
result = batch.validate(
expect=ExpectColumnValuesToBeOfType(
column=self.COLUMN,
type_="GEOMETRY",
)
)
assert result.success
@pytest.mark.redshift
def test_super(self):
column_type = REDSHIFT_TYPES.SUPER
batch_setup = RedshiftBatchTestSetup(
config=RedshiftDatasourceTestConfig(column_types={self.COLUMN: column_type}),
data=pd.DataFrame({self.COLUMN: ['{ "type": "Point", "coordinates": [1.0, 2.0] }']}),
extra_data={},
context=get_context(mode="ephemeral"),
)
with batch_setup.batch_test_context() as batch:
result = batch.validate(
expect=ExpectColumnValuesToBeOfType(
column=self.COLUMN,
type_="SUPER",
)
)
assert result.success
| TestRedshiftDataTypes |
python | getsentry__sentry | tests/sentry/api/test_paginator.py | {
"start": 36469,
"end": 38417
} | class ____:
cls = EAPPageTokenPaginator
def test_first_page_empty(self) -> None:
def data_fn(limit, page_token):
return {
"data": [],
"page_token": PageToken(end_pagination=True),
}
paginator = self.cls(data_fn=data_fn)
page = paginator.get_result(limit=3, cursor=None)
assert page["data"] == []
assert page.prev.has_results is False
assert page.next.has_results is False
def test_first_page_all_data(self) -> None:
def data_fn(limit, page_token):
return {
"data": [1, 2, 3],
"page_token": PageToken(end_pagination=True),
}
paginator = self.cls(data_fn=data_fn)
page = paginator.get_result(limit=3, cursor=None)
assert page["data"] == [1, 2, 3]
assert page.prev.has_results is False
assert page.next.has_results is False
def test_first_page_partial_data(self) -> None:
expected_page_token = PageToken(filter_offset=TraceItemFilter(and_filter=AndFilter()))
def data_fn(limit, page_token):
if page_token is None:
return {
"data": [1, 2, 3],
"page_token": expected_page_token,
}
return {
"data": [4, 5],
"page_token": PageToken(end_pagination=True),
}
paginator = self.cls(data_fn=data_fn)
page = paginator.get_result(limit=3, cursor=None)
assert page["data"] == [1, 2, 3]
assert page.prev.has_results is False
assert page.next.has_results is True
actual_page_token = PageToken()
actual_page_token.ParseFromString(base64.b64decode(page.next.value.encode("utf-8")))
assert actual_page_token == expected_page_token
page = paginator.get_result(limit=3, cursor=page.next)
| TestEAPPageTokenPaginator |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_testing.py | {
"start": 21367,
"end": 33120
} | class ____:
def __init__(self):
self.nb_calls = 0
def __call__(self, to_register_func):
self.nb_calls += 1
assert to_register_func.func is _delete_folder
def check_memmap(input_array, mmap_data, mmap_mode="r"):
assert isinstance(mmap_data, np.memmap)
writeable = mmap_mode != "r"
assert mmap_data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, mmap_data)
def test_tempmemmap(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, "register", registration_counter)
input_array = np.ones(3)
with TempMemmap(input_array) as data:
check_memmap(input_array, data)
temp_folder = os.path.dirname(data.filename)
if os.name != "nt":
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 1
mmap_mode = "r+"
with TempMemmap(input_array, mmap_mode=mmap_mode) as data:
check_memmap(input_array, data, mmap_mode=mmap_mode)
temp_folder = os.path.dirname(data.filename)
if os.name != "nt":
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 2
def test_create_memmap_backed_data(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, "register", registration_counter)
input_array = np.ones(3)
data = create_memmap_backed_data(input_array)
check_memmap(input_array, data)
assert registration_counter.nb_calls == 1
data, folder = create_memmap_backed_data(input_array, return_folder=True)
check_memmap(input_array, data)
assert folder == os.path.dirname(data.filename)
assert registration_counter.nb_calls == 2
mmap_mode = "r+"
data = create_memmap_backed_data(input_array, mmap_mode=mmap_mode)
check_memmap(input_array, data, mmap_mode)
assert registration_counter.nb_calls == 3
input_list = [input_array, input_array + 1, input_array + 2]
mmap_data_list = create_memmap_backed_data(input_list)
for input_array, data in zip(input_list, mmap_data_list):
check_memmap(input_array, data)
assert registration_counter.nb_calls == 4
output_data, other = create_memmap_backed_data([input_array, "not-an-array"])
check_memmap(input_array, output_data)
assert other == "not-an-array"
@pytest.mark.parametrize(
"constructor_name, container_type",
[
("list", list),
("tuple", tuple),
("array", np.ndarray),
("sparse", sparse.csr_matrix),
# using `zip` will only keep the available sparse containers
# depending of the installed SciPy version
*zip(["sparse_csr", "sparse_csr_array"], CSR_CONTAINERS),
*zip(["sparse_csc", "sparse_csc_array"], CSC_CONTAINERS),
("dataframe", lambda: pytest.importorskip("pandas").DataFrame),
("series", lambda: pytest.importorskip("pandas").Series),
("index", lambda: pytest.importorskip("pandas").Index),
("pyarrow", lambda: pytest.importorskip("pyarrow").Table),
("pyarrow_array", lambda: pytest.importorskip("pyarrow").Array),
("polars", lambda: pytest.importorskip("polars").DataFrame),
("polars_series", lambda: pytest.importorskip("polars").Series),
("slice", slice),
],
)
@pytest.mark.parametrize(
"dtype, superdtype",
[
(np.int32, np.integer),
(np.int64, np.integer),
(np.float32, np.floating),
(np.float64, np.floating),
],
)
def test_convert_container(
constructor_name,
container_type,
dtype,
superdtype,
):
"""Check that we convert the container to the right type of array with the
right data type."""
if constructor_name in (
"dataframe",
"index",
"polars",
"polars_series",
"pyarrow",
"pyarrow_array",
"series",
):
# delay the import of pandas/polars within the function to only skip this test
# instead of the whole file
container_type = container_type()
container = [0, 1]
container_converted = _convert_container(
container,
constructor_name,
dtype=dtype,
)
assert isinstance(container_converted, container_type)
if constructor_name in ("list", "tuple", "index"):
# list and tuple will use Python class dtype: int, float
# pandas index will always use high precision: np.int64 and np.float64
assert np.issubdtype(type(container_converted[0]), superdtype)
elif constructor_name in ("polars", "polars_series", "pyarrow", "pyarrow_array"):
return
elif hasattr(container_converted, "dtype"):
assert container_converted.dtype == dtype
elif hasattr(container_converted, "dtypes"):
assert container_converted.dtypes[0] == dtype
def test_convert_container_categories_pandas():
pytest.importorskip("pandas")
df = _convert_container(
[["x"]], "dataframe", ["A"], categorical_feature_names=["A"]
)
assert df.dtypes.iloc[0] == "category"
def test_convert_container_categories_polars():
pl = pytest.importorskip("polars")
df = _convert_container([["x"]], "polars", ["A"], categorical_feature_names=["A"])
assert df.schema["A"] == pl.Categorical()
def test_convert_container_categories_pyarrow():
pa = pytest.importorskip("pyarrow")
df = _convert_container([["x"]], "pyarrow", ["A"], categorical_feature_names=["A"])
assert type(df.schema[0].type) is pa.DictionaryType
def test_raises():
# Tests for the raises context manager
# Proper type, no match
with raises(TypeError):
raise TypeError()
# Proper type, proper match
with raises(TypeError, match="how are you") as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# Proper type, proper match with multiple patterns
with raises(TypeError, match=["not this one", "how are you"]) as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# bad type, no match
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError) as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# Bad type, no match, with an err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, err_msg="the failure message") as cm:
raise ValueError()
assert not cm.raised_and_matched
# bad type, with match (is ignored anyway)
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError, match="this is ignored") as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# proper type but bad match
with pytest.raises(
AssertionError, match="should contain one of the following patterns"
):
with raises(TypeError, match="hello") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# proper type but bad match, with err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, match="hello", err_msg="the failure message") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# no raise with default may_pass=False
with pytest.raises(AssertionError, match="Did not raise"):
with raises(TypeError) as cm:
pass
assert not cm.raised_and_matched
# no raise with may_pass=True
with raises(TypeError, match="hello", may_pass=True) as cm:
pass # still OK
assert not cm.raised_and_matched
# Multiple exception types:
with raises((TypeError, ValueError)):
raise TypeError()
with raises((TypeError, ValueError)):
raise ValueError()
with pytest.raises(AssertionError):
with raises((TypeError, ValueError)):
pass
def test_float32_aware_assert_allclose():
# The relative tolerance for float32 inputs is 1e-4
assert_allclose(np.array([1.0 + 2e-5], dtype=np.float32), 1.0)
with pytest.raises(AssertionError):
assert_allclose(np.array([1.0 + 2e-4], dtype=np.float32), 1.0)
# The relative tolerance for other inputs is left to 1e-7 as in
# the original numpy version.
assert_allclose(np.array([1.0 + 2e-8], dtype=np.float64), 1.0)
with pytest.raises(AssertionError):
assert_allclose(np.array([1.0 + 2e-7], dtype=np.float64), 1.0)
# atol is left to 0.0 by default, even for float32
with pytest.raises(AssertionError):
assert_allclose(np.array([1e-5], dtype=np.float32), 0.0)
assert_allclose(np.array([1e-5], dtype=np.float32), 0.0, atol=2e-5)
@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
def test_assert_run_python_script_without_output():
code = "x = 1"
assert_run_python_script_without_output(code)
code = "print('something to stdout')"
with pytest.raises(AssertionError, match="Expected no output"):
assert_run_python_script_without_output(code)
code = "print('something to stdout')"
with pytest.raises(
AssertionError,
match="output was not supposed to match.+got.+something to stdout",
):
assert_run_python_script_without_output(code, pattern="to.+stdout")
code = "\n".join(["import sys", "print('something to stderr', file=sys.stderr)"])
with pytest.raises(
AssertionError,
match="output was not supposed to match.+got.+something to stderr",
):
assert_run_python_script_without_output(code, pattern="to.+stderr")
@pytest.mark.parametrize(
"constructor_name",
[
"sparse_csr",
"sparse_csc",
pytest.param(
"sparse_csr_array",
),
pytest.param(
"sparse_csc_array",
),
],
)
def test_convert_container_sparse_to_sparse(constructor_name):
"""Non-regression test to check that we can still convert a sparse container
from a given format to another format.
"""
X_sparse = sparse.random(10, 10, density=0.1, format="csr")
_convert_container(X_sparse, constructor_name)
def check_warnings_as_errors(warning_info, warnings_as_errors):
if warning_info.action == "error" and warnings_as_errors:
with pytest.raises(warning_info.category, match=warning_info.message):
warnings.warn(
message=warning_info.message,
category=warning_info.category,
)
if warning_info.action == "ignore":
with warnings.catch_warnings(record=True) as record:
message = warning_info.message
# Special treatment when regex is used
if "Pyarrow" in message:
message = "\nPyarrow will become a required dependency"
warnings.warn(
message=message,
category=warning_info.category,
)
assert len(record) == 0 if warnings_as_errors else 1
if record:
assert str(record[0].message) == message
assert record[0].category == warning_info.category
@pytest.mark.parametrize("warning_info", _get_warnings_filters_info_list())
def test_sklearn_warnings_as_errors(warning_info):
warnings_as_errors = os.environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0"
check_warnings_as_errors(warning_info, warnings_as_errors=warnings_as_errors)
@pytest.mark.parametrize("warning_info", _get_warnings_filters_info_list())
def test_turn_warnings_into_errors(warning_info):
with warnings.catch_warnings():
turn_warnings_into_errors()
check_warnings_as_errors(warning_info, warnings_as_errors=True)
| RegistrationCounter |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_delaware_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_delaware_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_delaware_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidDelawareZip |
python | pypa__virtualenv | src/virtualenv/seed/embed/base_embed.py | {
"start": 289,
"end": 5229
} | class ____(Seeder, ABC):
def __init__(self, options) -> None:
super().__init__(options, enabled=options.no_seed is False)
self.download = options.download
self.extra_search_dir = [i.resolve() for i in options.extra_search_dir if i.exists()]
self.pip_version = options.pip
self.setuptools_version = options.setuptools
# wheel version needs special handling
# on Python > 3.8, the default is None (as in not used)
# so we can differentiate between explicit and implicit none
self.wheel_version = options.wheel or "none"
self.no_pip = options.no_pip
self.no_setuptools = options.no_setuptools
self.no_wheel = options.no_wheel
self.app_data = options.app_data
self.periodic_update = not options.no_periodic_update
if options.py_version[:2] >= (3, 9):
if options.wheel is not None or options.no_wheel:
LOGGER.warning(
"The --no-wheel and --wheel options are deprecated. "
"They have no effect for Python > 3.8 as wheel is no longer "
"bundled in virtualenv.",
)
self.no_wheel = True
if not self.distribution_to_versions():
self.enabled = False
@classmethod
def distributions(cls) -> dict[str, Version]:
return {
"pip": Version.bundle,
"setuptools": Version.bundle,
"wheel": Version.bundle,
}
def distribution_to_versions(self) -> dict[str, str]:
return {
distribution: getattr(self, f"{distribution}_version")
for distribution in self.distributions()
if getattr(self, f"no_{distribution}", None) is False and getattr(self, f"{distribution}_version") != "none"
}
@classmethod
def add_parser_arguments(cls, parser, interpreter, app_data): # noqa: ARG003
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--no-download",
"--never-download",
dest="download",
action="store_false",
help=f"pass to disable download of the latest {'/'.join(cls.distributions())} from PyPI",
default=True,
)
group.add_argument(
"--download",
dest="download",
action="store_true",
help=f"pass to enable download of the latest {'/'.join(cls.distributions())} from PyPI",
default=False,
)
parser.add_argument(
"--extra-search-dir",
metavar="d",
type=Path,
nargs="+",
help="a path containing wheels to extend the internal wheel list (can be set 1+ times)",
default=[],
)
for distribution, default in cls.distributions().items():
help_ = f"version of {distribution} to install as seed: embed, bundle, none or exact version"
if interpreter.version_info[:2] >= (3, 12) and distribution in {"wheel", "setuptools"}:
default = "none" # noqa: PLW2901
if interpreter.version_info[:2] >= (3, 9) and distribution == "wheel":
default = None # noqa: PLW2901
help_ = SUPPRESS
parser.add_argument(
f"--{distribution}",
dest=distribution,
metavar="version",
help=help_,
default=default,
)
for distribution in cls.distributions():
help_ = f"do not install {distribution}"
if interpreter.version_info[:2] >= (3, 9) and distribution == "wheel":
help_ = SUPPRESS
parser.add_argument(
f"--no-{distribution}",
dest=f"no_{distribution}",
action="store_true",
help=help_,
default=False,
)
parser.add_argument(
"--no-periodic-update",
dest="no_periodic_update",
action="store_true",
help="disable the periodic (once every 14 days) update of the embedded wheels",
default=not PERIODIC_UPDATE_ON_BY_DEFAULT,
)
def __repr__(self) -> str:
result = self.__class__.__name__
result += "("
if self.extra_search_dir:
result += f"extra_search_dir={', '.join(str(i) for i in self.extra_search_dir)},"
result += f"download={self.download},"
for distribution in self.distributions():
if getattr(self, f"no_{distribution}", None):
continue
version = getattr(self, f"{distribution}_version", None)
if version == "none":
continue
ver = f"={version or 'latest'}"
result += f" {distribution}{ver},"
return result[:-1] + ")"
__all__ = [
"BaseEmbed",
]
| BaseEmbed |
python | ApeWorX__ape | src/ape/cli/options.py | {
"start": 5305,
"end": 20629
} | class ____(Option):
"""
The class used in `:meth:~ape.cli.options.network_option`.
"""
# NOTE: Has to be kwargs only to avoid multiple-values for arg error.
def __init__(self, *args, **kwargs) -> None:
ecosystem = kwargs.pop("ecosystem", None)
network = kwargs.pop("network", None)
provider = kwargs.pop("provider", None)
default = kwargs.pop("default", "auto")
callback = kwargs.pop("callback", None)
# NOTE: If using network_option, this part is skipped
# because parsing happens earlier to handle advanced usage.
if not kwargs.get("type"):
base_type = kwargs.pop("base_type", None)
kwargs["type"] = NetworkChoice(
case_sensitive=False,
ecosystem=ecosystem,
network=network,
provider=provider,
base_type=base_type,
callback=callback,
)
elif callback is not None:
# Make sure these are the same.
kwargs["type"].callback = callback
auto = default == "auto"
required = kwargs.get("required", False)
if auto and not required:
if ecosystem:
default = ecosystem[0] if isinstance(ecosystem, (list, tuple)) else ecosystem
else:
# NOTE: Use a function as the default so it is calculated lazily
def fn():
from ape.utils.basemodel import ManagerAccessMixin
return ManagerAccessMixin.network_manager.default_ecosystem.name
default = fn
elif auto:
default = None
help_msg = (
"Override the default network and provider. (see `ape networks list` for options)"
)
kwargs = {
"param_decls": ("--network",),
"help": help_msg,
"default": default,
"required": required,
**kwargs,
}
super().__init__(**kwargs)
def network_option(
default: Optional[Union[str, Callable]] = "auto",
ecosystem: Optional[Union[list[str], str]] = None,
network: Optional[Union[list[str], str]] = None,
provider: Optional[Union[list[str], str]] = None,
required: bool = False,
**kwargs,
) -> Callable:
"""
A ``click.option`` for specifying a network.
Args:
default (Optional[str]): Optionally, change which network to
use as the default. Defaults to how ``ape`` normally
selects a default network unless ``required=True``, then defaults to ``None``.
ecosystem (Optional[Union[list[str], str]]): Filter the options by ecosystem.
Defaults to getting all ecosystems.
network (Optional[Union[list[str], str]]): Filter the options by network.
Defaults to getting all networks in ecosystems.
provider (Optional[Union[list[str], str]]): Filter the options by provider.
Defaults to getting all providers in networks.
required (bool): Whether the option is required. Defaults to ``False``.
When set to ``True``, the default value is ``None``.
kwargs: Additional overrides to ``click.option``.
"""
def decorator(f):
# These are the available network object names you can request.
network_object_names = ("ecosystem", "network", "provider")
requested_network_objects = _get_requested_networks(f, network_object_names)
# When using network_option, handle parsing now so we can pass to
# callback outside of command context.
user_callback = kwargs.pop("callback", None)
def callback(ctx, param, value):
keep_as_choice_str = param.type.base_type is str
try:
provider_obj = _get_provider(value, default, keep_as_choice_str)
except Exception as err:
raise click.BadOptionUsage("--network", str(err), ctx)
if provider_obj:
_update_context_with_network(ctx, provider_obj, requested_network_objects)
elif keep_as_choice_str:
# Add raw choice to object context.
ctx.obj = ctx.obj or {}
ctx.params = ctx.params or {}
ctx.obj["network"] = value
ctx.params["network"] = value
# else: provider is None, meaning not connected intentionally.
return value if user_callback is None else user_callback(ctx, param, value)
wrapped_f = _wrap_network_function(network_object_names, requested_network_objects, f)
# Use NetworkChoice option.
kwargs["type"] = None
# Set this to false to avoid click passing in a str value for network.
# This happens with `kwargs["type"] = None` and we are already handling
# `network` via the partial.
kwargs["expose_value"] = False
# The callback will set any requests values in the command.
kwargs["callback"] = callback
# Create the actual option.
return click.option(
default=default,
ecosystem=ecosystem,
network=network,
provider=provider,
required=required,
cls=NetworkOption,
**kwargs,
)(wrapped_f)
return decorator
def _get_requested_networks(function, network_object_names):
command_signature = inspect.signature(function)
command_kwargs = [x.name for x in command_signature.parameters.values()]
# Any combination of ["ecosystem", "network", "provider"]
return [x for x in command_kwargs if x in network_object_names]
def _update_context_with_network(ctx, provider, requested_network_objects):
choice_classes = {
"ecosystem": provider.network.ecosystem,
"network": provider.network,
"provider": provider,
}
# Set the actual values in the callback.
for item in requested_network_objects:
instance = choice_classes[item]
ctx.params[item] = instance
if isinstance(ctx.command, ConnectedProviderCommand):
# Place all values, regardless of request in
# the context. This helps the Ape CLI backend.
if ctx.obj is None:
# Happens when using commands that don't use the
# Ape context or any context.
ctx.obj = {}
for choice, obj in choice_classes.items():
try:
ctx.obj[choice] = obj
except Exception:
# This would only happen if using an unusual context object.
raise Abort(
"Cannot use connected-provider command type(s) "
"with non key-settable context object."
)
def _get_provider(value, default, keep_as_choice_str):
from ape.api.providers import ProviderAPI
from ape.utils.basemodel import ManagerAccessMixin
use_default = value is None and default == "auto"
if not keep_as_choice_str and use_default:
default_ecosystem = ManagerAccessMixin.network_manager.default_ecosystem
return default_ecosystem.default_network.default_provider
elif value is None or keep_as_choice_str:
return None
elif isinstance(value, ProviderAPI):
return value
elif value == _NONE_NETWORK:
return None
else:
network_ctx = ManagerAccessMixin.network_manager.parse_network_choice(value)
return network_ctx._provider
def _wrap_network_function(network_object_names, requested_network_objects, function):
# Prevent argument errors but initializing callback to use None placeholders.
partial_kwargs: dict = {}
for arg_type in network_object_names:
if arg_type in requested_network_objects:
partial_kwargs[arg_type] = None
if partial_kwargs:
wrapped_f = partial(function, **partial_kwargs)
# NOTE: The following is needed for click internals.
wrapped_f.__name__ = function.__name__ # type: ignore[attr-defined]
# NOTE: The following is needed for sphinx internals.
wrapped_f.__doc__ = function.__doc__
# Add other click parameters.
if hasattr(function, "__click_params__"):
wrapped_f.__click_params__ = function.__click_params__ # type: ignore[attr-defined]
return wrapped_f
else:
# No network kwargs are used. No need for partial wrapper.
return function
def skip_confirmation_option(help="") -> Callable:
"""
A ``click.option`` for skipping confirmation (``--yes``).
Args:
help (str): CLI option help text. Defaults to ``""``.
"""
return click.option(
"-y",
"--yes",
"skip_confirmation",
default=False,
is_flag=True,
help=help,
)
def account_option(
*param_decls,
account_type: _ACCOUNT_TYPE_FILTER = None,
prompt: Optional[Union[str, bool]] = AccountAliasPromptChoice.DEFAULT_PROMPT,
) -> Callable:
"""
A CLI option that accepts either the account alias or the account number.
If not given anything, it will prompt the user to select an account.
"""
if not param_decls:
param_decls = ("--account",)
def _account_callback(ctx, param, value):
if prompt and param and not value:
return param.type.select_account()
return value
prompt_message = prompt if isinstance(prompt, str) else None
return click.option(
*param_decls,
type=AccountAliasPromptChoice(key=account_type, prompt_message=prompt_message),
callback=_account_callback,
)
def _load_contracts(ctx, param, value) -> Optional[Union["ContractType", list["ContractType"]]]:
if not value:
return None
from ape.utils.basemodel import ManagerAccessMixin
if len(ManagerAccessMixin.local_project.contracts) == 0:
raise ProjectError("Project has no contracts.")
# If the user passed in `multiple=True`, then `value` is a list,
# and therefore we should also return a list.
is_multiple = isinstance(value, (tuple, list))
def get_contract(contract_name: str) -> "ContractType":
if contract_name not in ManagerAccessMixin.local_project.contracts:
raise ProjectError(f"No contract named '{value}'")
return ManagerAccessMixin.local_project.contracts[contract_name]
return [get_contract(c) for c in value] if is_multiple else get_contract(value)
def contract_option(help=None, required=False, multiple=False) -> Callable:
"""
Contract(s) from the current project.
If you pass ``multiple=True``, you will get a list of contract types from the callback.
:class:`~ape.exceptions.ContractError`: In the callback when it fails to load the contracts.
"""
help = help or "The name of a contract in the current project"
return click.option(
"--contract", help=help, required=required, callback=_load_contracts, multiple=multiple
)
def output_format_option(default: OutputFormat = OutputFormat.TREE) -> Callable:
"""
A ``click.option`` for specifying a format to use when outputting data.
Args:
default (:class:`~ape.cli.choices.OutputFormat`): Defaults to ``TREE`` format.
"""
return click.option(
"--format",
"output_format",
type=output_format_choice(),
default=default.value,
callback=lambda ctx, param, value: OutputFormat(value.upper()),
)
def incompatible_with(incompatible_opts) -> type[click.Option]:
"""
Factory for creating custom ``click.Option`` subclasses that
enforce incompatibility with the option strings passed to this function.
Usage example::
import click
@click.command()
@click.option("--option", cls=incompatible_with(["other_option"]))
def cmd(option, other_option):
....
"""
if isinstance(incompatible_opts, str):
incompatible_opts = [incompatible_opts]
class IncompatibleOption(click.Option):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def handle_parse_result(self, ctx, opts, args):
# if None it means we're in autocomplete mode and don't want to validate
if ctx.obj is not None:
found_incompatible = ", ".join(
[f"--{opt.replace('_', '-')}" for opt in opts if opt in incompatible_opts]
)
if self.name is not None and self.name in opts and found_incompatible:
name = self.name.replace("_", "-")
raise click.BadOptionUsage(
option_name=self.name,
message=f"'--{name}' can't be used with '{found_incompatible}'.",
)
return super().handle_parse_result(ctx, opts, args)
return IncompatibleOption
def _project_path_callback(ctx, param, val):
return Path(val) if val else Path.cwd()
def _project_callback(ctx, param, val):
if "--help" in sys.argv or "-h" in sys.argv:
# Perf: project option is eager; have to check sys.argv to
# know to exit early when only doing --help.
return
from ape.utils.basemodel import ManagerAccessMixin
pm = None
if not val:
pm = ManagerAccessMixin.local_project
else:
path = Path(val)
if path == ManagerAccessMixin.local_project.path:
pm = ManagerAccessMixin.local_project
else:
Project = ManagerAccessMixin.Project
if path.is_file() and path.suffix == ".json":
pm = Project.from_manifest(path)
elif path.is_dir():
pm = Project(path)
if pm is None:
raise click.BadOptionUsage("--project", "Not a valid project")
return pm
def project_option(**kwargs):
_type = kwargs.pop("type", None)
callback = (
_project_path_callback
if (isinstance(_type, type) and issubclass(_type, Path))
else _project_callback
)
return click.option(
"--project",
help="The path to a local project or manifest",
callback=callback,
metavar="PATH",
is_eager=True,
**kwargs,
)
def _json_option(name, help, **kwargs):
return click.option(
name,
help=help,
type=JSON(),
metavar='{"KEY": "VAL"}',
**kwargs,
)
def config_override_option(**kwargs):
return _json_option("--config-override", help="Config override mappings", **kwargs)
def _excluded_compilers_callback(ctx, param, value):
if not value:
return
return [c.lower() for c in value]
def excluded_compilers_option(**kwargs):
from ape.utils.basemodel import ManagerAccessMixin
registered_compilers_options = [
compiler.name
for compiler in ManagerAccessMixin.compiler_manager.registered_compilers.values()
]
return click.option(
"--exclude-compiler",
"excluded_compilers",
help="Exclude specific compilers from the compilation process",
type=click.Choice(registered_compilers_options, case_sensitive=False),
callback=_excluded_compilers_callback,
multiple=True,
**kwargs,
)
| NetworkOption |
python | tensorflow__tensorflow | third_party/xla/xla/hlo/tools/generate_hlo_test_checks.py | {
"start": 9901,
"end": 12221
} | class ____(Generic[_T]):
"""Sorts each element of an input stream into arbitrarily many output streams.
"""
def __init__(
self,
input_stream: Iterator[_T],
select_buffer: Callable[
[_T],
Union[collections.deque[_T], tuple[collections.deque[_T], ...], None],
],
):
self._input_stream: Iterator[_T] = input_stream
self._select_buffer: Callable[
[_T],
Union[collections.deque[_T], tuple[collections.deque[_T], ...], None],
] = select_buffer
def next_in_buffer(self, target_buffer: collections.deque[_T]) -> _T:
"""Returns the next item in the sub-stream corresponding to `target_buffer`.
Args:
target_buffer: The queue backing the sub-stream whose next element should
be returned.
Returns:
If `target_buffer` is nonempty, the next element of `target_buffer`.
Otherwise, the next element of `self._input_stream` that would have been
added to `target_buffer`.
Raises:
StopIteration: If `target_buffer` and `self._input_stream` are both empty.
"""
if bool(target_buffer):
return target_buffer.popleft()
for item in self._input_stream:
which_buffer = self._select_buffer(item)
if which_buffer is None:
continue
if which_buffer is target_buffer:
return item
if isinstance(which_buffer, collections.deque):
which_buffer.append(item)
continue
if isinstance(which_buffer, tuple):
return_item: bool = False
for buffer in which_buffer:
if buffer is target_buffer:
return_item = True
else:
buffer.append(item)
if return_item:
return item
continue
T = TypeVar("T", bound=_T)
ExpectedTypes = Union[
collections.deque[T], tuple[collections.deque[T], ...], None
]
raise TypeError(
f"`{self._select_buffer}` returned a value of type "
f"`{type(which_buffer).__name__}`; expected one of `{ExpectedTypes}`."
)
raise StopIteration()
def iterate_over_buffer(
self, target_buffer: collections.deque[_T]
) -> Iterator[_T]:
while True:
try:
yield self.next_in_buffer(target_buffer)
except StopIteration:
return
| IterateByCategory |
python | coleifer__peewee | pwiz.py | {
"start": 389,
"end": 483
} | class ____(Model):
class Meta:
database = database
"""
UNKNOWN_FIELD = """\
| BaseModel |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 8461,
"end": 9270
} | class ____(Field):
field_type = 'JSON'
_json_datatype = 'json'
def __init__(self, dumps=None, *args, **kwargs):
self.dumps = dumps or json.dumps
super(JSONField, self).__init__(*args, **kwargs)
def db_value(self, value):
if value is None:
return value
if not isinstance(value, Json):
return Cast(self.dumps(value), self._json_datatype)
return value
def __getitem__(self, value):
return JsonLookup(self, [value])
def path(self, *keys):
return JsonPath(self, keys)
def concat(self, value):
if not isinstance(value, Node):
value = Json(value)
return super(JSONField, self).concat(value)
def cast_jsonb(node):
return NodeList((node, SQL('::jsonb')), glue='')
| JSONField |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 20023,
"end": 21660
} | class ____(Scroll):
''' *toolbar icon*: |wheel_pan_icon|
The wheel pan tool allows the user to pan the plot along the configured
dimension using the scroll wheel.
.. |wheel_pan_icon| image:: /_images/icons/wheel-pan.svg
:height: 24px
:alt: Icon of a mouse shape next to crossed arrows representing the wheel-pan tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
dimension = Enum(Dimension, default="width", help="""
Which dimension the wheel pan tool is constrained to act in. By default the
wheel pan tool will pan the plot along the x-axis.
""")
modifiers = Modifiers(default={}, help="""
Allows to configure a combination of modifier keys, which need to
be pressed during the selected gesture for this tool to trigger.
For example, to pan only when ``Ctrl`` and ``Shift`` keys are
pressed, use:
.. code-block:: python
tool = WheelPanTool(modifiers=dict(ctrl=True, shift=True))
plot.add_tools(tool)
or alternatively using a concise syntax:
.. code-block:: python
tool = WheelPanTool(modifiers="ctrl+shift")
plot.add_tools(tool)
.. note::
Setting modifiers allows this tool to be automatically activated,
if ``Toolbar.active_scroll`` is set to ``"auto"``.
.. warning::
Configuring modifiers is a platform dependent feature and
can make this tool unusable for example on mobile devices.
""").accepts(String, _parse_modifiers)
| WheelPanTool |
python | django__django | tests/model_enums/tests.py | {
"start": 8640,
"end": 8749
} | class ____(datetime.time, models.Choices):
BREAKFAST = 7, 0
LUNCH = 13, 0
DINNER = 18, 30
| MealTimes |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_sheet_views2.py | {
"start": 301,
"end": 3457
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="1" topLeftCell="A2" activePane="bottomLeft" state="frozen"/><selection pane="bottomLeft"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(0, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" topLeftCell="B1" activePane="topRight" state="frozen"/><selection pane="topRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" ySplit="1" topLeftCell="B2" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="B1" sqref="B1"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes("G4")
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views5(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(3, 6, 3, 6, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozenSplit"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteSheetViews |
python | pandas-dev__pandas | pandas/core/dtypes/dtypes.py | {
"start": 32383,
"end": 39964
} | class ____(PeriodDtypeBase, PandasExtensionDtype):
"""
An ExtensionDtype for Period data.
**This is not an actual numpy dtype**, but a duck type.
Parameters
----------
freq : str or DateOffset
The frequency of this PeriodDtype.
Attributes
----------
freq
Methods
-------
None
See Also
--------
Period : Represents a single time period.
PeriodIndex : Immutable index for period data.
date_range : Return a fixed frequency DatetimeIndex.
Series : One-dimensional array with axis labels.
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Examples
--------
>>> pd.PeriodDtype(freq="D")
period[D]
>>> pd.PeriodDtype(freq=pd.offsets.MonthEnd())
period[M]
"""
type: type[Period] = Period
kind: str_type = "O"
str = "|O08"
base = np.dtype("O")
num = 102
_metadata = ("freq",)
_match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
# error: Incompatible types in assignment (expression has type
# "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype"
# defined the type as "Dict[str, PandasExtensionDtype]") [assignment]
_cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment]
__hash__ = PeriodDtypeBase.__hash__
_freq: BaseOffset
_supports_2d = True
_can_fast_transpose = True
def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034
"""
Parameters
----------
freq : PeriodDtype, BaseOffset, or string
"""
if isinstance(freq, PeriodDtype):
return freq
if not isinstance(freq, BaseOffset):
freq = cls._parse_dtype_strict(freq)
if isinstance(freq, BDay):
# GH#53446
# TODO(3.0): enforcing this will close GH#10575
warnings.warn(
"PeriodDtype[B] is deprecated and will be removed in a future "
"version. Use a DatetimeIndex with freq='B' instead",
FutureWarning, # pdlint: ignore[warning_class]
stacklevel=find_stack_level(),
)
try:
dtype_code = cls._cache_dtypes[freq]
except KeyError:
dtype_code = freq._period_dtype_code
cls._cache_dtypes[freq] = dtype_code
u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n)
u._freq = freq
return u
def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]:
return type(self), (self.name,)
@property
def freq(self) -> BaseOffset:
"""
The frequency object of this PeriodDtype.
The `freq` property returns the `BaseOffset` object that represents the
frequency of the PeriodDtype. This frequency specifies the interval (e.g.,
daily, monthly, yearly) associated with the Period type. It is essential
for operations that depend on time-based calculations within a period index
or series.
See Also
--------
Period : Represents a period of time.
PeriodIndex : Immutable ndarray holding ordinal values indicating
regular periods.
PeriodDtype : An ExtensionDtype for Period data.
date_range : Return a fixed frequency range of dates.
Examples
--------
>>> dtype = pd.PeriodDtype(freq="D")
>>> dtype.freq
<Day>
"""
return self._freq
@classmethod
def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset:
if isinstance(freq, str): # note: freq is already of type str!
if freq.startswith(("Period[", "period[")):
m = cls._match.search(freq)
if m is not None:
freq = m.group("freq")
freq_offset = to_offset(freq, is_period=True)
if freq_offset is not None:
return freq_offset
raise TypeError(
"PeriodDtype argument should be string or BaseOffset, "
f"got {type(freq).__name__}"
)
@classmethod
def construct_from_string(cls, string: str_type) -> PeriodDtype:
"""
Strict construction from a string, raise a TypeError if not
possible
"""
if (
isinstance(string, str) and (string.startswith(("period[", "Period[")))
) or isinstance(string, BaseOffset):
# do not parse string like U as period[U]
# avoid tuple to be regarded as freq
try:
return cls(freq=string)
except ValueError:
pass
if isinstance(string, str):
msg = f"Cannot construct a 'PeriodDtype' from '{string}'"
else:
msg = f"'construct_from_string' expects a string, got {type(string)}"
raise TypeError(msg)
def __str__(self) -> str_type:
return self.name
@property
def name(self) -> str_type:
return f"period[{self._freqstr}]"
@property
def na_value(self) -> NaTType:
return NaT
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return other[:1].lower() + other[1:] == self.name
return super().__eq__(other)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
@classmethod
def is_dtype(cls, dtype: object) -> bool:
"""
Return a boolean if the passed type is an actual dtype that we
can match (via string or type)
"""
if isinstance(dtype, str):
# PeriodDtype can be instantiated from freq string like "U",
# but doesn't regard freq str like "U" as dtype.
if dtype.startswith(("period[", "Period[")):
try:
return cls._parse_dtype_strict(dtype) is not None
except ValueError:
return False
else:
return False
return super().is_dtype(dtype)
def construct_array_type(self) -> type_t[PeriodArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays import PeriodArray
return PeriodArray
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:
"""
Construct PeriodArray from pyarrow Array/ChunkedArray.
"""
import pyarrow
from pandas.core.arrays import PeriodArray
from pandas.core.arrays.arrow._arrow_utils import (
pyarrow_array_to_numpy_and_mask,
)
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
chunks = array.chunks
results = []
for arr in chunks:
data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))
parr = PeriodArray(data.copy(), dtype=self, copy=False)
# error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray";
# expected type "Union[int, Sequence[int], Sequence[bool], slice]"
parr[~mask] = NaT # type: ignore[index]
results.append(parr)
if not results:
return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False)
return PeriodArray._concat_same_type(results)
@cache_readonly
def index_class(self) -> type_t[PeriodIndex]:
from pandas import PeriodIndex
return PeriodIndex
@register_extension_dtype
@set_module("pandas")
| PeriodDtype |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_checked_types.py | {
"start": 14803,
"end": 18411
} | class ____(PMap, CheckedType, metaclass=_CheckedMapTypeMeta):
"""
A CheckedPMap is a PMap which allows specifying type and invariant checks.
>>> class IntToFloatMap(CheckedPMap):
... __key_type__ = int
... __value_type__ = float
... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping')
...
>>> IntToFloatMap({1: 1.5, 2: 2.25})
IntToFloatMap({1: 1.5, 2: 2.25})
"""
__slots__ = ()
def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE):
if size is not _UNDEFINED_CHECKED_PMAP_SIZE:
return super(CheckedPMap, cls).__new__(cls, size, initial)
evolver = CheckedPMap.Evolver(cls, pmap())
for k, v in initial.items():
evolver.set(k, v)
return evolver.persistent()
def evolver(self):
return CheckedPMap.Evolver(self.__class__, self)
def __repr__(self):
return self.__class__.__name__ + "({0})".format(str(dict(self)))
__str__ = __repr__
def serialize(self, format=None):
serializer = self.__serializer__
return dict(serializer(format, k, v) for k, v in self.items())
@classmethod
def create(cls, source_data, _factory_fields=None):
if isinstance(source_data, cls):
return source_data
# Recursively apply create methods of checked types if the types of the supplied data
# does not match any of the valid types.
key_types = get_types(cls._checked_key_types)
checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None)
value_types = get_types(cls._checked_value_types)
checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None)
if checked_key_type or checked_value_type:
return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key,
checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value)
for key, value in source_data.items()))
return cls(source_data)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, dict(self),)
class Evolver(PMap._Evolver):
__slots__ = ('_destination_class', '_invariant_errors')
def __init__(self, destination_class, original_map):
super(CheckedPMap.Evolver, self).__init__(original_map)
self._destination_class = destination_class
self._invariant_errors = []
def set(self, key, value):
_check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError)
_check_types([value], self._destination_class._checked_value_types, self._destination_class)
self._invariant_errors.extend(data for valid, data in (invariant(key, value)
for invariant in self._destination_class._checked_invariants)
if not valid)
return super(CheckedPMap.Evolver, self).set(key, value)
def persistent(self):
if self._invariant_errors:
raise InvariantException(error_codes=self._invariant_errors)
if self.is_dirty() or type(self._original_pmap) != self._destination_class:
return self._destination_class(self._buckets_evolver.persistent(), self._size)
return self._original_pmap
| CheckedPMap |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_instance.py | {
"start": 2174,
"end": 4750
} | class ____:
def test_valid(self) -> None:
pd = pytest.importorskip("pandas")
Series, DataFrame, GroupBy = pd.Series, pd.DataFrame, pd.core.groupby.GroupBy
prop0 = bcpi.Object(Series)
assert prop0.is_valid(Series([1, 2, 3]))
prop1 = bcpi.Object("pandas.Series")
assert prop1.is_valid(Series([1, 2, 3]))
prop2 = bcpi.Object(DataFrame)
assert prop2.is_valid(DataFrame())
prop3 = bcpi.Object("pandas.DataFrame")
assert prop3.is_valid(DataFrame())
prop4 = bcpi.Object(GroupBy)
assert prop4.is_valid(GroupBy(DataFrame()))
prop5 = bcpi.Object("pandas.core.groupby.GroupBy")
assert prop5.is_valid(GroupBy(DataFrame()))
def test_invalid(self) -> None:
pd = pytest.importorskip("pandas")
Series, DataFrame, GroupBy = pd.Series, pd.DataFrame, pd.core.groupby.GroupBy
prop0 = bcpi.Object(Series)
assert not prop0.is_valid(DataFrame())
assert not prop0.is_valid(GroupBy(DataFrame()))
assert not prop0.is_valid({})
assert not prop0.is_valid(object())
assert not prop0.is_valid(_TestModel())
prop1 = bcpi.Object("pandas.Series")
assert not prop1.is_valid(DataFrame())
assert not prop1.is_valid(GroupBy(DataFrame()))
assert not prop1.is_valid({})
assert not prop1.is_valid(object())
assert not prop1.is_valid(_TestModel())
prop2 = bcpi.Object(DataFrame)
assert not prop2.is_valid(Series([1, 2, 3]))
assert not prop2.is_valid(GroupBy(DataFrame()))
assert not prop2.is_valid({})
assert not prop2.is_valid(object())
assert not prop2.is_valid(_TestModel())
prop3 = bcpi.Object("pandas.DataFrame")
assert not prop3.is_valid(Series([1, 2, 3]))
assert not prop3.is_valid(GroupBy(DataFrame()))
assert not prop3.is_valid({})
assert not prop3.is_valid(object())
assert not prop3.is_valid(_TestModel())
prop4 = bcpi.Object(GroupBy)
assert not prop4.is_valid(Series([1, 2, 3]))
assert not prop4.is_valid(DataFrame())
assert not prop4.is_valid({})
assert not prop4.is_valid(object())
assert not prop4.is_valid(_TestModel())
prop5 = bcpi.Object("pandas.core.groupby.GroupBy")
assert not prop5.is_valid(Series([1, 2, 3]))
assert not prop5.is_valid(DataFrame())
assert not prop5.is_valid({})
assert not prop5.is_valid(object())
assert not prop5.is_valid(_TestModel())
| Test_Object |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 143,
"end": 3889
} | class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(losses.MeanSquaredError(name="mymse"))
def test_base_function_reduction(self):
mse_fn = losses.mean_squared_error
y_true = np.array([4, 8, 12])
y_pred = np.array([[3], [0], [1]])
loss = mse_fn(y_true, y_pred)
self.assertEqual(backend.shape(loss), (3,))
def test_all_correct_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[4, 8, 12], [8, 1, 3]])
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(loss, 0.0)
def test_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 49.5)
def test_scalar_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 113.85)
def test_sample_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 767.8 / 6)
def test_timestep_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
loss = mse_obj(
y_true,
y_pred,
sample_weight=sample_weight,
)
self.assertAlmostEqual(loss, 97.833336)
def test_zero_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(loss, 0.0)
def test_no_reduction(self):
mse_obj = losses.MeanSquaredError(reduction=None)
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, [84.3333, 143.3666])
def test_sum_reduction(self):
mse_obj = losses.MeanSquaredError(reduction="sum")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 227.69998)
def test_mean_with_sample_weight_reduction(self):
mse_obj = losses.MeanSquaredError(reduction="mean_with_sample_weight")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
sample_weight = np.array([[1.2], [3.4]])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(
loss, (110 / 3 * 1.2 + 187 / 3 * 3.4) / (1.2 + 3.4)
)
def test_dtype_arg(self):
mse_obj = losses.MeanSquaredError(dtype="bfloat16")
y_true = np.array([[1, 9, 2], [-5, -2, 6]])
y_pred = np.array([[4, 8, 12], [8, 1, 3]], dtype="float32")
loss = mse_obj(y_true, y_pred)
self.assertDType(loss, "bfloat16")
| MeanSquaredErrorTest |
python | huggingface__transformers | src/transformers/models/seed_oss/modeling_seed_oss.py | {
"start": 2185,
"end": 2912
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
SeedOssRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| SeedOssRMSNorm |
python | facelessuser__pymdown-extensions | pymdownx/critic.py | {
"start": 3765,
"end": 5275
} | class ____(Postprocessor):
"""Handle cleanup on post process for viewing critic marks."""
def __init__(self, critic_stash):
"""Initialize."""
super().__init__()
self.critic_stash = critic_stash
def subrestore(self, m):
"""Replace all critic tags in the paragraph block `<p>(critic del close)(critic ins close)</p>` etc."""
content = None
key = m.group('key')
if key is not None:
content = self.critic_stash.get(key)
return content
def block_edit(self, m):
"""Handle block edits."""
if 'break' in m.group(4).split(' '):
return m.group(0)
else:
return m.group(1) + m.group(2) + m.group(4) + ' block' + m.group(5)
def restore(self, m):
"""Replace placeholders with actual critic tags."""
content = None
if m.group('block_keys') is not None:
content = RE_CRITIC_SUB_PLACEHOLDER.sub(
self.subrestore, m.group('block_keys')
)
if content is not None:
content = RE_CRITIC_BLOCK.sub(self.block_edit, content)
else:
text = self.critic_stash.get(m.group('key'))
if text is not None:
content = text
return content if content is not None else m.group(0)
def run(self, text):
"""Replace critic placeholders."""
text = RE_CRITIC_PLACEHOLDER.sub(self.restore, text)
return text
| CriticsPostprocessor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super4.py | {
"start": 280,
"end": 391
} | class ____(Generic[_T1]):
@classmethod
def construct(cls: type[_T1]) -> _T1:
return cls()
| Parent1 |
python | kamyu104__LeetCode-Solutions | Python/total-waviness-of-numbers-in-range-ii.py | {
"start": 3151,
"end": 5082
} | class ____(object):
def totalWaviness(self, num1, num2):
"""
:type num1: int
:type num2: int
:rtype: int
"""
def count(x):
s = str(x)
dp = {}
for prev in xrange(-1, 10):
for prev2 in xrange(-1, 10):
for zero in xrange(2):
for tight in xrange(2):
dp[(prev, prev2, zero, tight)] = (1, 0)
for i in reversed(xrange(len(s))):
new_dp = {}
for prev in xrange(-1, 10):
for prev2 in xrange(-1, 10):
for zero in xrange(2):
for tight in xrange(2):
cnt = w = 0
mx = int(s[i]) if tight else 9
for d in xrange(mx+1):
new_tight = tight and (d == int(s[i]))
new_zero = zero and (d == 0)
new_prev2 = prev
new_prev = d if not new_zero else -1
key = (new_prev, new_prev2, new_zero, new_tight)
if key in dp:
new_cnt, nw = dp[key]
cnt += new_cnt
if not zero and prev2 != -1 and ((prev2 < prev and prev > d) or (prev2 > prev and prev < d)):
w += new_cnt
w += nw
new_dp[(prev, prev2, zero, tight)] = (cnt, w)
dp = new_dp
return dp[(-1, -1, True, True)][1]
return count(num2)-count(num1-1)
# Time: O(logn * 11 * 11 * 2 * 2 * 10)
# Space: O(11 * 11 * 2 * 2)
# dp by list
| Solution3 |
python | facebookresearch__faiss | tests/test_index_accuracy.py | {
"start": 12742,
"end": 14765
} | class ____(unittest.TestCase):
def test_L1(self):
search_Ls = [10, 20, 30]
thresholds = [0.83, 0.92, 0.95]
for search_L, threshold in zip(search_Ls, thresholds):
self.subtest(32, faiss.METRIC_L1, 10, search_L, threshold)
def test_L2(self):
search_Ls = [10, 20, 30]
thresholds = [0.83, 0.92, 0.95]
for search_L, threshold in zip(search_Ls, thresholds):
self.subtest(32, faiss.METRIC_L2, 10, search_L, threshold)
def test_IP(self):
search_Ls = [10, 20, 30]
thresholds = [0.80, 0.90, 0.93]
for search_L, threshold in zip(search_Ls, thresholds):
self.subtest(32, faiss.METRIC_INNER_PRODUCT, 10, search_L, threshold)
def subtest(self, d, metric, topk, search_L, threshold):
metric_names = {
faiss.METRIC_L1: "L1",
faiss.METRIC_L2: "L2",
faiss.METRIC_INNER_PRODUCT: "IP",
}
topk = 10
nt, nb, nq = 2000, 1000, 200
xt, xb, xq = get_dataset_2(d, nt, nb, nq)
gt_index = faiss.IndexFlat(d, metric)
gt_index.add(xb)
gt_D, gt_I = gt_index.search(xq, topk)
K = 16
index = faiss.IndexNNDescentFlat(d, K, metric)
index.nndescent.S = 10
index.nndescent.R = 32
index.nndescent.L = K + 20
index.nndescent.iter = 5
index.verbose = False
index.nndescent.search_L = search_L
index.add(xb)
D, I = index.search(xq, topk)
recalls = 0
for i in range(nq):
for j in range(topk):
for k in range(topk):
if I[i, j] == gt_I[i, k]:
recalls += 1
break
recall = 1.0 * recalls / (nq * topk)
print(
"Metric: {}, L: {}, Recall@{}: {}".format(
metric_names[metric], search_L, topk, recall
)
)
assert recall > threshold, "{} <= {}".format(recall, threshold)
| TestNNDescent |
python | pytorch__pytorch | torch/nn/modules/rnn.py | {
"start": 31422,
"end": 48956
} | class ____(RNNBase):
r"""__init__(input_size,hidden_size,num_layers=1,bias=True,batch_first=False,dropout=0.0,bidirectional=False,proj_size=0,device=None,dtype=None)
Apply a multi-layer long short-term memory (LSTM) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
h_t = o_t \odot \tanh(c_t) \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
is the hidden state of the layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l \ge 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
If ``proj_size > 0`` is specified, LSTM with projections will be used. This changes
the LSTM cell in the following way. First, the dimension of :math:`h_t` will be changed from
``hidden_size`` to ``proj_size`` (dimensions of :math:`W_{hi}` will be changed accordingly).
Second, the output hidden state of each layer will be multiplied by a learnable projection
matrix: :math:`h_t = W_{hr}h_t`. Note that as a consequence of this, the output
of LSTM network will be of different shape as well. See Inputs/Outputs sections below for exact
dimensions of all variables. You can find more details in https://arxiv.org/abs/1402.1128.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two LSTMs together to form a `stacked LSTM`,
with the second LSTM taking in outputs of the first LSTM and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
proj_size: If ``> 0``, will use LSTM with projections of corresponding size. Default: 0
Inputs: input, (h_0, c_0)
* **input**: tensor of shape :math:`(L, H_{in})` for unbatched input,
:math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence. The input can also be a packed variable length sequence.
See :func:`torch.nn.utils.rnn.pack_padded_sequence` or
:func:`torch.nn.utils.rnn.pack_sequence` for details.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the
initial hidden state for each element in the input sequence.
Defaults to zeros if (h_0, c_0) is not provided.
* **c_0**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{cell})` containing the
initial cell state for each element in the input sequence.
Defaults to zeros if (h_0, c_0) is not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{cell} ={} & \text{hidden\_size} \\
H_{out} ={} & \text{proj\_size if } \text{proj\_size}>0 \text{ otherwise hidden\_size} \\
\end{aligned}
Outputs: output, (h_n, c_n)
* **output**: tensor of shape :math:`(L, D * H_{out})` for unbatched input,
:math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the LSTM, for each `t`. If a
:class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output
will also be a packed sequence. When ``bidirectional=True``, `output` will contain
a concatenation of the forward and reverse hidden states at each time step in the sequence.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{out})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{out})` containing the
final hidden state for each element in the sequence. When ``bidirectional=True``,
`h_n` will contain a concatenation of the final forward and reverse hidden states, respectively.
* **c_n**: tensor of shape :math:`(D * \text{num\_layers}, H_{cell})` for unbatched input or
:math:`(D * \text{num\_layers}, N, H_{cell})` containing the
final cell state for each element in the sequence. When ``bidirectional=True``,
`c_n` will contain a concatenation of the final forward and reverse cell states, respectively.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
`(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`. If
``proj_size > 0`` was specified, the shape will be
`(4*hidden_size, num_directions * proj_size)` for `k > 0`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
`(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`. If ``proj_size > 0``
was specified, the shape will be `(4*hidden_size, proj_size)`.
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
`(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
`(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
weight_hr_l[k] : the learnable projection weights of the :math:`\text{k}^{th}` layer
of shape `(proj_size, hidden_size)`. Only present when ``proj_size > 0`` was
specified.
weight_ih_l[k]_reverse: Analogous to `weight_ih_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
weight_hh_l[k]_reverse: Analogous to `weight_hh_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
bias_ih_l[k]_reverse: Analogous to `bias_ih_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
bias_hh_l[k]_reverse: Analogous to `bias_hh_l[k]` for the reverse direction.
Only present when ``bidirectional=True``.
weight_hr_l[k]_reverse: Analogous to `weight_hr_l[k]` for the reverse direction.
Only present when ``bidirectional=True`` and ``proj_size > 0`` was specified.
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional LSTMs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
.. note::
For bidirectional LSTMs, `h_n` is not equivalent to the last element of `output`; the
former contains the final forward and reverse hidden states, while the latter contains the
final forward hidden state and the initial reverse hidden state.
.. note::
``batch_first`` argument is ignored for unbatched inputs.
.. note::
``proj_size`` should be smaller than ``hidden_size``.
.. include:: ../cudnn_rnn_determinism.rst
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
@overload
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
batch_first: bool = False,
dropout: float = 0.0,
bidirectional: bool = False,
proj_size: int = 0,
device=None,
dtype=None,
) -> None: ...
@overload
def __init__(self, *args, **kwargs) -> None: ...
def __init__(self, *args, **kwargs):
super().__init__("LSTM", *args, **kwargs)
def get_expected_cell_size(
self, input: Tensor, batch_sizes: Optional[Tensor]
) -> tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (
self.num_layers * num_directions,
mini_batch,
self.hidden_size,
)
return expected_hidden_size
# In the future, we should prevent mypy from applying contravariance rules here.
# See torch/nn/modules/module.py::_forward_unimplemented
def check_forward_args(
self,
input: Tensor,
hidden: tuple[Tensor, Tensor], # type: ignore[override]
batch_sizes: Optional[Tensor],
) -> None:
self.check_input(input, batch_sizes)
self.check_hidden_size(
hidden[0],
self.get_expected_hidden_size(input, batch_sizes),
"Expected hidden[0] size {}, got {}",
)
self.check_hidden_size(
hidden[1],
self.get_expected_cell_size(input, batch_sizes),
"Expected hidden[1] size {}, got {}",
)
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
def permute_hidden( # type: ignore[override]
self,
hx: tuple[Tensor, Tensor],
permutation: Optional[Tensor],
) -> tuple[Tensor, Tensor]:
if permutation is None:
return hx
return _apply_permutation(hx[0], permutation), _apply_permutation(
hx[1], permutation
)
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
@overload # type: ignore[override]
@torch._jit_internal._overload_method # noqa: F811
def forward(
self,
input: Tensor,
hx: Optional[tuple[Tensor, Tensor]] = None,
) -> tuple[Tensor, tuple[Tensor, Tensor]]: # noqa: F811
pass
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
@overload
@torch._jit_internal._overload_method # noqa: F811
def forward(
self,
input: PackedSequence,
hx: Optional[tuple[Tensor, Tensor]] = None,
) -> tuple[PackedSequence, tuple[Tensor, Tensor]]: # noqa: F811
pass
def forward(self, input, hx=None): # noqa: F811
self._update_flat_weights()
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
batch_sizes = None
num_directions = 2 if self.bidirectional else 1
real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
if hx is None:
h_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
device=input.device,
)
c_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (h_zeros, c_zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
else:
if input.dim() not in (2, 3):
raise ValueError(
f"LSTM: Expected input to be 2D or 3D, got {input.dim()}D instead"
)
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
h_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
device=input.device,
)
c_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (h_zeros, c_zeros)
self.check_forward_args(input, hx, batch_sizes)
else:
if is_batched:
if hx[0].dim() != 3 or hx[1].dim() != 3:
msg = (
"For batched 3-D input, hx and cx should "
f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
)
raise RuntimeError(msg)
else:
if hx[0].dim() != 2 or hx[1].dim() != 2:
msg = (
"For unbatched 2-D input, hx and cx should "
f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
)
raise RuntimeError(msg)
hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
self.check_forward_args(input, hx, batch_sizes)
hx = self.permute_hidden(hx, sorted_indices)
if batch_sizes is None:
result = _VF.lstm(
input,
hx,
self._flat_weights, # type: ignore[arg-type]
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
result = _VF.lstm(
input,
batch_sizes,
hx,
self._flat_weights, # type: ignore[arg-type]
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
output = result[0]
hidden = result[1:]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(
output,
batch_sizes,
sorted_indices,
unsorted_indices,
)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched: # type: ignore[possibly-undefined]
output = output.squeeze(batch_dim) # type: ignore[possibly-undefined]
hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
return output, self.permute_hidden(hidden, unsorted_indices)
| LSTM |
python | astropy__astropy | astropy/logger.py | {
"start": 1656,
"end": 4377
} | class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.logger`.
"""
log_level = _config.ConfigItem(
"INFO",
"Threshold for the logging messages. Logging "
"messages that are less severe than this level "
"will be ignored. The levels are ``'DEBUG'``, "
"``'INFO'``, ``'WARNING'``, ``'ERROR'``.",
)
log_warnings = _config.ConfigItem(True, "Whether to log `warnings.warn` calls.")
log_exceptions = _config.ConfigItem(
False, "Whether to log exceptions before raising them."
)
log_to_file = _config.ConfigItem(
False, "Whether to always log messages to a log file."
)
log_file_path = _config.ConfigItem(
"",
"The file to log messages to. If empty string is given, "
"it defaults to a file ``'astropy.log'`` in "
"the astropy config directory.",
)
log_file_level = _config.ConfigItem(
"INFO", "Threshold for logging messages to `log_file_path`."
)
log_file_format = _config.ConfigItem(
"%(asctime)r, %(origin)r, %(levelname)r, %(message)r",
"Format for log file entries.",
)
log_file_encoding = _config.ConfigItem(
"",
"The encoding (e.g., UTF-8) to use for the log file. If empty string "
"is given, it defaults to the platform-preferred encoding.",
)
conf = Conf()
def _init_log():
"""Initializes the Astropy log--in most circumstances this is called
automatically when importing astropy.
"""
global log
orig_logger_cls = logging.getLoggerClass()
logging.setLoggerClass(AstropyLogger)
try:
log = logging.getLogger("astropy")
log._set_defaults()
finally:
logging.setLoggerClass(orig_logger_cls)
return log
def _teardown_log():
"""Shut down exception and warning logging (if enabled) and clear all
Astropy loggers from the logging module's cache.
This involves poking some logging module internals, so much if it is 'at
your own risk' and is allowed to pass silently if any exceptions occur.
"""
global log
if log.exception_logging_enabled():
log.disable_exception_logging()
if log.warnings_logging_enabled():
log.disable_warnings_logging()
del log
# Now for the fun stuff...
try:
logging._acquireLock()
try:
loggerDict = logging.Logger.manager.loggerDict
for key in loggerDict.keys():
if key == "astropy" or key.startswith("astropy."):
del loggerDict[key]
finally:
logging._releaseLock()
except Exception:
pass
Logger = logging.getLoggerClass()
| Conf |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing.py | {
"start": 10173,
"end": 10655
} | class ____:
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
monotonic = Series(np.arange(1000000), index=idx)
return monotonic
def time_getitem_scalar(self, monotonic):
monotonic[80000]
def time_loc_scalar(self, monotonic):
monotonic.loc[80000]
def time_getitem_list(self, monotonic):
monotonic[80000:]
def time_loc_list(self, monotonic):
monotonic.loc[80000:]
| IntervalIndexing |
python | ray-project__ray | python/ray/tests/unit/test_runtime_env_validation.py | {
"start": 10846,
"end": 12709
} | class ____:
def test_validate_pip(self, set_runtime_env_plugin_schemas):
runtime_env = RuntimeEnv()
runtime_env.set("pip", {"packages": ["requests"], "pip_check": True})
with pytest.raises(jsonschema.exceptions.ValidationError, match="pip_check"):
runtime_env.set("pip", {"packages": ["requests"], "pip_check": "1"})
runtime_env["pip"] = {"packages": ["requests"], "pip_check": True}
with pytest.raises(jsonschema.exceptions.ValidationError, match="pip_check"):
runtime_env["pip"] = {"packages": ["requests"], "pip_check": "1"}
def test_validate_working_dir(self, set_runtime_env_plugin_schemas):
runtime_env = RuntimeEnv()
runtime_env.set("working_dir", "https://abc/file.zip")
with pytest.raises(jsonschema.exceptions.ValidationError, match="working_dir"):
runtime_env.set("working_dir", ["https://abc/file.zip"])
runtime_env["working_dir"] = "https://abc/file.zip"
with pytest.raises(jsonschema.exceptions.ValidationError, match="working_dir"):
runtime_env["working_dir"] = ["https://abc/file.zip"]
def test_validate_test_env_1(self, set_runtime_env_plugin_schemas):
runtime_env = RuntimeEnv()
runtime_env.set("test_env_1", {"array": ["123"], "bool": True})
with pytest.raises(jsonschema.exceptions.ValidationError, match="bool"):
runtime_env.set("test_env_1", {"array": ["123"], "bool": "1"})
def test_validate_test_env_2(self, set_runtime_env_plugin_schemas):
runtime_env = RuntimeEnv()
runtime_env.set("test_env_2", "123")
with pytest.raises(jsonschema.exceptions.ValidationError, match="test_env_2"):
runtime_env.set("test_env_2", ["123"])
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
| TestValidateByJsonSchema |
python | walkccc__LeetCode | solutions/3014. Minimum Number of Pushes to Type Word I/3014.py | {
"start": 0,
"end": 198
} | class ____:
def minimumPushes(self, word: str) -> int:
freqs = sorted(collections.Counter(word).values(), reverse=True)
return sum(freq * (i // 8 + 1) for i, freq in enumerate(freqs))
| Solution |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 63057,
"end": 66420
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("uk_UA")
Faker.seed(0)
self.provider = UkUAProvider
self.translit = UkUATranslit
def test_male_first_names(self):
for _ in range(100):
res = self.fake.first_name_male()
assert res in self.provider.first_names_male
def test_female_first_names(self):
for _ in range(100):
res = self.fake.first_name_female()
assert res in self.provider.first_names_female
def test_male_last_names(self):
for _ in range(100):
res = self.fake.last_name_male()
assert res in self.provider.last_names_male
def test_female_last_names(self):
for _ in range(100):
res = self.fake.last_name_female()
assert res in self.provider.last_names_female
def test_middle_names(self):
for _ in range(100):
res = self.fake.middle_name()
assert res in self.provider.middle_names
def test_male_middle_names(self):
for _ in range(100):
res = self.fake.middle_name_male()
assert res in self.provider.middle_names_male
def test_female_middle_names(self):
for _ in range(100):
res = self.fake.middle_name_female()
assert res in self.provider.middle_names_female
def test_language_name(self):
for _ in range(100):
language_name = self.fake.language_name()
assert language_name in self.provider.language_names
def test_transliteration(self):
assert self.translit("Сергій") == "Serhii"
assert self.translit("Лілія") == "Liliia"
assert self.translit("Яся") == "Yasia"
assert self.translit("Демʼян") == "Demian"
assert self.translit("Марʼяна") == "Mariana"
assert (
self.translit("абвгґдеєжзиіїйклмнопрстуфхцчшщьюяєʼ'-") == "abvhgdeiezhzyiiiklmnoprstufkhtschshshchiuiaie'-"
)
assert self.translit("АБВГҐДЕЄЖЗИІЇЙКЛМНОПРСТУФХЦЧШЩЬЮЯ") == "ABVHGDEYeZhZYIYiYKLMNOPRSTUFKhTsChShShchYuYa"
def test_full_name_male(self):
for _ in range(10):
res = self.fake.full_name(gender="M")
last_name, first_name, middle_name = res.split(" ")
assert last_name in self.provider.last_names_male
assert first_name in self.provider.first_names_male
assert middle_name in self.provider.middle_names_male
def test_full_name_female(self):
for _ in range(1000):
res = self.fake.full_name(gender="F")
last_name, first_name, middle_name = res.split(" ")
assert last_name in self.provider.last_names_female
assert first_name in self.provider.first_names_female
assert middle_name in self.provider.middle_names_female
def test_full_name(self):
for _ in range(10):
res = self.fake.full_name()
last_name, first_name, middle_name = res.split(" ")
assert last_name in self.provider.last_names
assert first_name in self.provider.first_names
assert middle_name in self.provider.middle_names
def test_short_full_name(self):
res = self.fake.full_name(short=True)
assert res.count(".") == 2
assert res.count(" ") == 1
| TestUkUa |
python | openai__gym | gym/error.py | {
"start": 1382,
"end": 1484
} | class ____(Error):
"""Raised when the user has not installed a dependency."""
| DependencyNotInstalled |
python | kamyu104__LeetCode-Solutions | Python/two-sum.py | {
"start": 29,
"end": 781
} | class ____(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
lookup = {}
for i, num in enumerate(nums):
if target - num in lookup:
return [lookup[target - num], i]
lookup[num] = i
def twoSum2(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in nums:
j = target - i
tmp_nums_start_index = nums.index(i) + 1
tmp_nums = nums[tmp_nums_start_index:]
if j in tmp_nums:
return [nums.index(i), tmp_nums_start_index + tmp_nums.index(j)]
| Solution |
python | openai__openai-python | src/openai/types/batch_usage.py | {
"start": 413,
"end": 521
} | class ____(BaseModel):
reasoning_tokens: int
"""The number of reasoning tokens."""
| OutputTokensDetails |
python | weaviate__weaviate-python-client | weaviate/collections/aggregations/near_object/async_.py | {
"start": 199,
"end": 270
} | class ____(_NearObjectExecutor[ConnectionAsync]):
pass
| _NearObjectAsync |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 109557,
"end": 113095
} | class ____(PallasBaseTest):
@parameterized.parameters((2,), (5,), (6,), (7,))
def test_checkify_with_scalar_prefetch(self, threshold):
def body(scalar_ref, x_ref, o_ref):
scalar = scalar_ref[pl.program_id(0)]
o_ref[...] = x_ref[...]
checkify.check(scalar < threshold, 'failed on value {x}', x=scalar)
s = jnp.array([4, 3, 2, 6, 3, 5, 2, 7], jnp.int32)
x = jnp.arange(8 * 8 * 128, dtype=jnp.int32).reshape((8 * 8, 128))
def _x_transform(i, s_ref):
return (s_ref[i], 0)
pallas_call = self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct(x.shape, jnp.int32),
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=1,
in_specs=[
pl.BlockSpec((x.shape[0] // 8, x.shape[1]), _x_transform),
],
out_specs=pl.BlockSpec(
(x.shape[0] // 8, x.shape[1]), lambda i, _: (i, 0)
),
grid=8,
),
)
checked_call = checkify.checkify(pallas_call)
err, out = checked_call(s, x)
expected_error_value = s[jnp.argmax(s >= threshold)]
with self.assertRaisesRegex(
checkify.JaxRuntimeError, f'failed on value {expected_error_value}'):
err.throw()
np.testing.assert_allclose(out, x.reshape((8, 8, -1))[s].reshape(x.shape))
def test_checkify_with_scratch(self):
def body(x_ref, o_ref, scratch_ref):
scratch_ref[...] = x_ref[...]
o_ref[...] = scratch_ref[...]
all_nequal = ~jnp.all(o_ref[...] == x_ref[...])
checkify.check(all_nequal, 'x_ref equals o_ref id=({x}, {y})',
x=pl.program_id(0), y=pl.program_id(1))
x = jax.random.uniform(jax.random.key(0), (128, 512), dtype=jnp.float32)
pallas_call = self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct(x.shape, jnp.float32),
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
in_specs=[
pl.BlockSpec((32, 128), lambda i, j: (i, j)),
],
out_specs=pl.BlockSpec((32, 128), lambda i, j: (i, j)),
scratch_shapes=[pltpu.VMEM((32, 128), dtype=jnp.float32)],
grid=(4, 4),
),
)
checked_call = checkify.checkify(pallas_call)
err, out = checked_call(x)
with self.assertRaisesRegex(
checkify.JaxRuntimeError, r'x_ref equals o_ref id=\(0, 0\)'):
err.throw()
np.testing.assert_allclose(out, x)
@parameterized.parameters((4,), (9,))
def test_checkify_with_dynamic_grid(self, iteration):
grid_size = 4
shape = (8, 128)
result_ty = jax.ShapeDtypeStruct(shape, jnp.float32)
def kernel(y_ref):
@pl.when(pl.program_id(0) == 0)
def _init():
y_ref[...] = jnp.zeros_like(y_ref)
y_ref[...] += 1
@pl.when(pl.program_id(0) == iteration)
def _():
checkify.check(False, f"error on iteration {iteration}")
@jax.jit
def dynamic_kernel(steps):
pallas_call = self.pallas_call(
kernel,
grid=(steps * 2,),
out_specs=pl.BlockSpec(shape, lambda i: (0, 0)),
out_shape=result_ty,
)
return checkify.checkify(pallas_call)()
err, result = dynamic_kernel(jnp.int32(grid_size))
if iteration < grid_size * 2:
with self.assertRaisesRegex(
checkify.JaxRuntimeError, f"error on iteration {iteration}"):
err.throw()
np.testing.assert_array_equal(
result, np.full(shape, grid_size * 2.0, np.float32)
)
| PallasCallTPUCheckifyTest |
python | huggingface__transformers | src/transformers/models/sew_d/modeling_sew_d.py | {
"start": 35859,
"end": 36447
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = nn.Dropout(config.activation_dropout)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
| SEWDOutput |
python | doocs__leetcode | solution/0300-0399/0312.Burst Balloons/Solution.py | {
"start": 0,
"end": 403
} | class ____:
def maxCoins(self, nums: List[int]) -> int:
n = len(nums)
arr = [1] + nums + [1]
f = [[0] * (n + 2) for _ in range(n + 2)]
for i in range(n - 1, -1, -1):
for j in range(i + 2, n + 2):
for k in range(i + 1, j):
f[i][j] = max(f[i][j], f[i][k] + f[k][j] + arr[i] * arr[k] * arr[j])
return f[0][-1]
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_http_ingress_rule_value.py | {
"start": 383,
"end": 3751
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'paths': 'list[V1HTTPIngressPath]'
}
attribute_map = {
'paths': 'paths'
}
def __init__(self, paths=None, local_vars_configuration=None): # noqa: E501
"""V1HTTPIngressRuleValue - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._paths = None
self.discriminator = None
self.paths = paths
@property
def paths(self):
"""Gets the paths of this V1HTTPIngressRuleValue. # noqa: E501
paths is a collection of paths that map requests to backends. # noqa: E501
:return: The paths of this V1HTTPIngressRuleValue. # noqa: E501
:rtype: list[V1HTTPIngressPath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""Sets the paths of this V1HTTPIngressRuleValue.
paths is a collection of paths that map requests to backends. # noqa: E501
:param paths: The paths of this V1HTTPIngressRuleValue. # noqa: E501
:type: list[V1HTTPIngressPath]
"""
if self.local_vars_configuration.client_side_validation and paths is None: # noqa: E501
raise ValueError("Invalid value for `paths`, must not be `None`") # noqa: E501
self._paths = paths
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HTTPIngressRuleValue):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1HTTPIngressRuleValue):
return True
return self.to_dict() != other.to_dict()
| V1HTTPIngressRuleValue |
python | tensorflow__tensorflow | tensorflow/python/autograph/operators/data_structures_test.py | {
"start": 1132,
"end": 7298
} | class ____(test.TestCase):
def test_new_list_empty(self):
l = data_structures.new_list()
# Can't evaluate an empty list.
# TODO(mdan): sess.run should allow tf.variant maybe?
self.assertTrue(isinstance(l, tensor.Tensor))
def test_new_list_tensor(self):
l = data_structures.new_list([3, 4, 5])
self.assertAllEqual(l, [3, 4, 5])
def test_tf_tensor_list_new(self):
l = data_structures.tf_tensor_list_new([3, 4, 5])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_list_new_empty(self):
l = data_structures.tf_tensor_list_new([],
element_dtype=dtypes.int32,
element_shape=())
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [])
def test_tf_tensor_list_new_from_tensor(self):
l = data_structures.tf_tensor_list_new(constant_op.constant([3, 4, 5]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
@test_util.run_deprecated_v1
def test_tf_tensor_list_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4.0])
# TODO(mdan): It might make more sense to type cast in this case.
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_dtype=dtypes.float32)
# Tensor lists do support heterogeneous lists.
self.assertIsNot(data_structures.tf_tensor_list_new([3, [4, 5]]), None)
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new(
constant_op.constant([1, 2, 3]), element_shape=[1])
def test_tf_tensor_array_new(self):
l = data_structures.tf_tensor_array_new([3, 4, 5])
t = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_array_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4.0])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_dtype=dtypes.float32)
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, [4, 5]])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([], element_shape=(2,))
# TAs can infer the shape.
self.assertIsNot(
data_structures.tf_tensor_array_new([], element_dtype=dtypes.float32),
None)
def test_append_tensor_list(self):
l = data_structures.new_list()
x = constant_op.constant([1, 2, 3])
l = data_structures.list_append(l, x)
t = list_ops.tensor_list_stack(l, element_dtype=x.dtype)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [[1, 2, 3]])
@test_util.run_deprecated_v1
def test_append_tensorarray(self):
l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True)
l1 = data_structures.list_append(l, 1)
l2 = data_structures.list_append(l1, 2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(l1.stack()), [1])
self.assertAllEqual(self.evaluate(l2.stack()), [1, 2])
def test_append_python(self):
l = []
self.assertAllEqual(data_structures.list_append(l, 1), [1])
self.assertAllEqual(data_structures.list_append(l, 2), [1, 2])
def test_pop_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListPopOpts(
element_dtype=initial_list.dtype,
element_shape=(2,))
with self.assertRaises(NotImplementedError):
data_structures.list_pop(l, 0, opts)
with self.cached_session() as sess:
l, x = data_structures.list_pop(l, None, opts)
self.assertAllEqual(self.evaluate(x), [3, 4])
t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
self.assertAllEqual(self.evaluate(t), [[1, 2]])
def test_pop_python(self):
l = [1, 2, 3]
opts = data_structures.ListPopOpts(element_dtype=None, element_shape=())
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1, 2], 3))
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1], 2))
def test_stack_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListStackOpts(
element_dtype=initial_list.dtype, original_call=None)
with self.cached_session() as sess:
t = data_structures.list_stack(l, opts)
self.assertAllEqual(self.evaluate(t), self.evaluate(initial_list))
@test_util.run_deprecated_v1
def test_stack_tensor_list_empty(self):
l = list_ops.empty_tensor_list(
element_shape=None, element_dtype=dtypes.variant)
opts = data_structures.ListStackOpts(
element_dtype=dtypes.int32, original_call=None)
# TODO(mdan): Allow stacking empty lists if the dtype and shape are known.
with self.assertRaises(ValueError):
data_structures.list_stack(l, opts)
def test_stack_fallback(self):
def dummy_function(l):
# Lazy person's mock: just transform the argument in a way in which we
# can check that this function was indeed called.
return [x * 2 for x in l]
opts = data_structures.ListStackOpts(
element_dtype=None, original_call=dummy_function)
self.assertAllEqual(data_structures.list_stack([1, 2], opts), [2, 4])
if __name__ == '__main__':
test.main()
| ListTest |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 92893,
"end": 97022
} | class ____(ModelOutput):
"""
Base class for sequence-to-sequence spectrogram outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
spectrogram (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_bins)`):
The predicted spectrogram.
past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
spectrogram: Optional[torch.FloatTensor] = None
past_key_values: Optional[EncoderDecoderCache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| Seq2SeqSpectrogramOutput |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 41083,
"end": 41423
} | class ____(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
| fftw3_info |
python | django__django | tests/forms_tests/field_tests/test_floatfield.py | {
"start": 5806,
"end": 6473
} | class ____(SeleniumTestCase):
available_apps = ["forms_tests"]
def test_float_field_rendering_passes_client_side_validation(self):
"""
Rendered widget allows non-integer value with the client-side
validation.
"""
from selenium.webdriver.common.by import By
self.selenium.get(self.live_server_url + reverse("form_view"))
number_input = self.selenium.find_element(By.ID, "id_number")
number_input.send_keys("0.5")
is_valid = self.selenium.execute_script(
"return document.getElementById('id_number').checkValidity()"
)
self.assertTrue(is_valid)
| FloatFieldHTMLTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_set_column05.py | {
"start": 315,
"end": 1744
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_column05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
bold_italic = workbook.add_format({"bold": 1, "italic": 1})
chart.axis_ids = [68311296, 69198208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write("A1", "Foo", italic)
worksheet.write("B1", "Bar", bold)
worksheet.write_column("A2", data[0])
worksheet.write_column("B2", data[1])
worksheet.write_column("C2", data[2])
worksheet.set_row(12, None, italic)
worksheet.set_column("F:F", None, bold)
worksheet.write("F13", None, bold_italic)
chart.add_series({"values": "=Sheet1!$A$2:$A$6"})
chart.add_series({"values": "=Sheet1!$B$2:$B$6"})
chart.add_series({"values": "=Sheet1!$C$2:$C$6"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py | {
"start": 226,
"end": 6820
} | class ____(TestCase):
def setUp(self):
self.validator = MagicMock(wraps=RequestValidator())
self.validator.client_authentication_required.return_value = True
self.validator.authenticate_client.return_value = True
self.validator.revoke_token.return_value = True
self.endpoint = RevocationEndpoint(self.validator)
self.uri = 'https://example.com/revoke_token'
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
self.resp_h = {
'Cache-Control': 'no-store',
'Content-Type': 'application/json',
'Pragma': 'no-cache'
}
def test_revoke_token(self):
for token_type in ('access_token', 'refresh_token', 'invalid'):
body = urlencode([('token', 'foo'),
('token_type_hint', token_type)])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, '')
self.assertEqual(s, 200)
# don't specify token_type_hint
body = urlencode([('token', 'foo')])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, '')
self.assertEqual(s, 200)
def test_revoke_token_client_authentication_failed(self):
self.validator.authenticate_client.return_value = False
body = urlencode([('token', 'foo'),
('token_type_hint', 'access_token')])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
"WWW-Authenticate": 'Bearer error="invalid_client"'
})
self.assertEqual(loads(b)['error'], 'invalid_client')
self.assertEqual(s, 401)
def test_revoke_token_public_client_authentication(self):
self.validator.client_authentication_required.return_value = False
self.validator.authenticate_client_id.return_value = True
for token_type in ('access_token', 'refresh_token', 'invalid'):
body = urlencode([('token', 'foo'),
('token_type_hint', token_type)])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, '')
self.assertEqual(s, 200)
def test_revoke_token_public_client_authentication_failed(self):
self.validator.client_authentication_required.return_value = False
self.validator.authenticate_client_id.return_value = False
body = urlencode([('token', 'foo'),
('token_type_hint', 'access_token')])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
"WWW-Authenticate": 'Bearer error="invalid_client"'
})
self.assertEqual(loads(b)['error'], 'invalid_client')
self.assertEqual(s, 401)
def test_revoke_with_callback(self):
endpoint = RevocationEndpoint(self.validator, enable_jsonp=True)
callback = 'package.hello_world'
for token_type in ('access_token', 'refresh_token', 'invalid'):
body = urlencode([('token', 'foo'),
('token_type_hint', token_type),
('callback', callback)])
h, b, s = endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, callback + '();')
self.assertEqual(s, 200)
def test_revoke_unsupported_token(self):
endpoint = RevocationEndpoint(self.validator,
supported_token_types=['access_token'])
body = urlencode([('token', 'foo'),
('token_type_hint', 'refresh_token')])
h, b, s = endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, self.resp_h)
self.assertEqual(loads(b)['error'], 'unsupported_token_type')
self.assertEqual(s, 400)
h, b, s = endpoint.create_revocation_response(self.uri,
headers=self.headers, body='')
self.assertEqual(h, self.resp_h)
self.assertEqual(loads(b)['error'], 'invalid_request')
self.assertEqual(s, 400)
def test_revoke_invalid_request_method(self):
endpoint = RevocationEndpoint(self.validator,
supported_token_types=['access_token'])
test_methods = ['GET', 'pUt', 'dEleTe', 'paTcH']
test_methods = test_methods + [x.lower() for x in test_methods] + [x.upper() for x in test_methods]
for method in test_methods:
body = urlencode([('token', 'foo'),
('token_type_hint', 'refresh_token')])
h, b, s = endpoint.create_revocation_response(self.uri,
http_method = method, headers=self.headers, body=body)
self.assertEqual(h, self.resp_h)
self.assertEqual(loads(b)['error'], 'invalid_request')
self.assertIn('Unsupported request method', loads(b)['error_description'])
self.assertEqual(s, 400)
def test_revoke_bad_post_request(self):
endpoint = RevocationEndpoint(self.validator,
supported_token_types=['access_token'])
for param in ['token', 'secret', 'code', 'foo']:
uri = 'http://some.endpoint?' + urlencode([(param, 'secret')])
body = urlencode([('token', 'foo'),
('token_type_hint', 'access_token')])
h, b, s = endpoint.create_revocation_response(uri,
headers=self.headers, body=body)
self.assertEqual(h, self.resp_h)
self.assertEqual(loads(b)['error'], 'invalid_request')
self.assertIn('query parameters are not allowed', loads(b)['error_description'])
self.assertEqual(s, 400)
| RevocationEndpointTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/recursiveTypeAlias11.py | {
"start": 469,
"end": 517
} | class ____(ClassB1, Generic[T2]):
pass
| ClassB2 |
python | pytorch__pytorch | test/distributed/tensor/test_utils.py | {
"start": 5729,
"end": 18575
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 8
def _compute_start_end_offsets(self, global_offset, local_size, n_dim):
offset = []
for i in range(n_dim):
offset.append(((global_offset[i]), (global_offset[i] + local_size[i])))
return offset
@with_comms
def test_compute_global_tensor_shape_1D(self):
one_d_placements = [[Shard(1)], [Shard(0)], [Replicate()]]
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
for placements in one_d_placements:
if isinstance(placements[0], Shard):
uneven_dim = list(range(self.world_size))
local_shape = (
torch.Size([5, uneven_dim[self.rank]])
if placements[0].dim == 1
else torch.Size([uneven_dim[self.rank], 5])
)
expected_global_shape = (
torch.Size([5, sum(uneven_dim)])
if placements[0].dim == 1
else torch.Size([sum(uneven_dim), 5])
)
else:
expected_global_shape = torch.Size([5, 5])
local_shape = torch.Size([5, 5])
global_shape = compute_global_tensor_shape(
local_shape, device_mesh, placements
)
self.assertEqual(global_shape, expected_global_shape)
@with_comms
def test_compute_global_tensor_shape_1D_invalid_shape(self):
one_d_placement = [Shard(1)]
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
uneven_dim = list(range(self.world_size))
local_shape = (
torch.Size([5, uneven_dim[self.rank]])
if self.rank % 2 == 0
else torch.Size([6, uneven_dim[self.rank]])
)
with self.assertRaisesRegex(
RuntimeError,
"Non-sharded dimensions should have identical size across ranks.",
):
_ = compute_global_tensor_shape(
local_shape,
device_mesh,
one_d_placement,
)
@with_comms
def test_compute_global_tensor_shape_failure_2D(self):
placement_2D = [Shard(0), Shard(1)]
device_mesh_2D = init_device_mesh(self.device_type, (2, 2))
with self.assertRaisesRegex(
NotImplementedError,
"compute_global_tensor_shape only supports 1 placement for now.",
):
_ = compute_global_tensor_shape(
torch.Size([2, 2]),
device_mesh_2D,
placement_2D,
)
placement_1D = [Shard(0)]
with self.assertRaisesRegex(
RuntimeError,
"Expected one placement per mesh dim",
):
_ = compute_global_tensor_shape(
torch.Size([2, 2]),
device_mesh_2D,
placement_1D,
)
@with_comms
def test_compute_local_shape_and_global_offset_1D(self):
one_d_placements = [[Shard(0)], [Replicate()]]
device_mesh = init_device_mesh(self.device_type, (self.world_size,))
for placements in one_d_placements:
# When the placements is [Shard(0)], we test for three different scenarios:
# 1) sharding resulting in empty shards on all or some of the ranks
# 2) sharding resulting in shards of different size across different ranks
# 3) sharding resulting in non-empty shards of same size across all ranks
for size in range(self.world_size * 2 + 1):
global_tensor = torch.arange(size)
global_shape = global_tensor.size()
dtensor = distribute_tensor(global_tensor, device_mesh, placements)
local_size, global_offset = compute_local_shape_and_global_offset(
global_shape, device_mesh, placements
)
dim = self._compute_start_end_offsets(global_offset, local_size, 1)
dim0_start, dim0_end = dim[0][0], dim[0][1]
# Check the local tensor of dtensor is exactly the same
# if we slice the global_tensor with local_size and global_offset
self.assertEqual(
dtensor.to_local(),
global_tensor[dim0_start:dim0_end],
)
@with_comms
def test_compute_local_shape_and_global_offset_2D(self):
two_d_placements_options = [Shard(0), Shard(1), Replicate()]
# Generating 6 two-d placements combinations
two_d_placements = list(
itertools.combinations_with_replacement(two_d_placements_options, 2)
)
# mesh: 2 * 4
device_mesh = init_device_mesh(self.device_type, (2, 4))
for placements in two_d_placements:
for dim_0_size in range(1, 9):
nelem = 64 // dim_0_size * dim_0_size
global_tensor = torch.arange(nelem).view(dim_0_size, -1)
global_shape = global_tensor.size()
dtensor = distribute_tensor(global_tensor, device_mesh, placements)
local_size, global_offset = compute_local_shape_and_global_offset(
global_shape, device_mesh, placements
)
dim = self._compute_start_end_offsets(global_offset, local_size, 2)
dim0_start, dim0_end = dim[0][0], dim[0][1]
dim1_start, dim1_end = dim[1][0], dim[1][1]
# Check the local tensor of dtensor is exactly the same
# if we slice the global_tensor with local_size and global_offset
self.assertEqual(
dtensor.to_local(),
global_tensor[dim0_start:dim0_end, dim1_start:dim1_end],
)
@with_comms
def test_fsdp_tp_meta_compute(self):
# FSDP + TP sharding
tp_size = 2
dp_size = self.world_size // tp_size
global_mesh = init_device_mesh(
self.device_type, (dp_size, tp_size), mesh_dim_names=("dp", "tp")
)
# local shard shape is [2, 2]
global_tensor_shape = torch.Size([2 * self.world_size, 2])
placements = [_StridedShard(0, split_factor=tp_size), Shard(0)]
local_shape, global_offset = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
assert global_mesh.get_coordinate is not None
dp_rank = global_mesh.get_local_rank("dp")
tp_rank = global_mesh.get_local_rank("tp")
shard_idx_on_dim_0 = tp_rank * dp_size + dp_rank
expected_local_shape = (2, 2)
expected_global_offset = (shard_idx_on_dim_0 * 2, 0)
self.assertEqual(local_shape, expected_local_shape)
self.assertEqual(global_offset, expected_global_offset)
@with_comms
def test_uneven_fsdp_tp_meta_compute(self):
# FSDP + TP uneven sharding
tp_size = 2
dp_size = self.world_size // tp_size
global_mesh = init_device_mesh(
self.device_type, (dp_size, tp_size), mesh_dim_names=("dp", "tp")
)
global_tensor_shape = torch.Size([15, 5])
placements = [_StridedShard(0, split_factor=tp_size), Shard(0)]
local_shape, global_offset = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
rank = global_mesh.get_rank()
expected_shapes = [2, 2, 2, 2, 2, 2, 2, 1]
expected_offsets = [0, 8, 2, 10, 4, 12, 6, 14]
self.assertEqual(local_shape[0], expected_shapes[rank])
self.assertEqual(global_offset[0], expected_offsets[rank])
@with_comms
def test_hsdp_tp_meta_compute(self):
# HSDP + TP sharding
tp_size = 2
dp_shard_size = 2
dp_replic_size = self.world_size // (dp_shard_size * tp_size)
global_mesh = init_device_mesh(
self.device_type,
(dp_replic_size, dp_shard_size, tp_size),
mesh_dim_names=("dp_replic", "dp_shard", "tp"),
)
# local shard shape is [2, 2]
global_tensor_shape = torch.Size([2 * dp_shard_size * tp_size, 2])
placements = [Replicate(), _StridedShard(0, split_factor=tp_size), Shard(0)]
local_shape, global_offset = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
assert global_mesh.get_coordinate is not None
dp_shard_rank = global_mesh.get_local_rank("dp_shard")
tp_rank = global_mesh.get_local_rank("tp")
shard_idx_on_dim_0 = tp_rank * dp_shard_size + dp_shard_rank
expected_local_shape = (2, 2)
expected_global_offset = (shard_idx_on_dim_0 * 2, 0)
self.assertEqual(local_shape, expected_local_shape)
self.assertEqual(global_offset, expected_global_offset)
# TODO: remove this test once we support general meta compute on strided sharding
@with_comms
def test_strided_sharding_assumption_in_meta_compute(self):
# current ``compute_local_shape_and_global_offset`` does not allow Shard(i)
# placement to appear after the strided sharding part has ended. This test
# check that ``compute_local_shape_and_global_offset`` does not allow placements
# that violate the assumption and does not forbid the allowed ones.
# Test 0: 2-D mesh
mesh_size_0 = 2
mesh_size_1 = self.world_size // mesh_size_0
global_mesh = init_device_mesh(
self.device_type,
(mesh_size_0, mesh_size_1),
mesh_dim_names=("mesh-0", "mesh-1"),
)
global_tensor_shape = torch.Size([2 * self.world_size, 2 * self.world_size])
for shard_dim in [0, 1]:
placements = [
_StridedShard(shard_dim, split_factor=mesh_size_1),
Shard(shard_dim),
]
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
# Test 1: 3-D mesh
mesh_size_0 = 2
mesh_size_1 = 2
mesh_size_2 = self.world_size // (mesh_size_0 * mesh_size_1)
global_mesh = init_device_mesh(
self.device_type,
(mesh_size_0, mesh_size_1, mesh_size_2),
mesh_dim_names=("mesh-0", "mesh-1", "mesh-2"),
)
# legal placements: Shard() appear after the strided part but it's on another
# tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
Shard(0),
Shard(1),
]
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
# illegal placements: Shard() appear after the strided part and it's on the
# same tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
Shard(0),
Shard(0),
]
with self.assertRaisesRegex(NotImplementedError, "the strided part has ended"):
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
# Test 2: 4-D mesh
mesh_size_0 = 1
mesh_size_1 = 2
mesh_size_2 = 2
mesh_size_3 = self.world_size // (mesh_size_0 * mesh_size_1 * mesh_size_2)
global_mesh = init_device_mesh(
self.device_type,
(mesh_size_0, mesh_size_1, mesh_size_2, mesh_size_3),
mesh_dim_names=("mesh-0", "mesh-1", "mesh-2", "mesh-3"),
)
# legal placements: Shard() appear after the strided part but it's on another
# tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
_StridedShard(1, split_factor=mesh_size_3),
Shard(0),
Shard(1),
]
local_shape, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
expected_local_shape = (
2 * mesh_size_1 * mesh_size_3,
2 * mesh_size_0 * mesh_size_2,
)
self.assertEqual(local_shape, expected_local_shape)
# illegal placements: Shard() appear after the strided part and it's on the
# same tensor dimension.
placements = [
_StridedShard(0, split_factor=mesh_size_1),
_StridedShard(1, split_factor=mesh_size_3),
Shard(0),
Shard(0),
]
with self.assertRaisesRegex(NotImplementedError, "the strided part has ended"):
_, _ = compute_local_shape_and_global_offset(
global_tensor_shape, global_mesh, placements
)
| UtilTest |
python | getsentry__sentry | tests/sentry/integrations/aws_lambda/test_utils.py | {
"start": 1480,
"end": 1756
} | class ____(TestCase):
def test_simple(self) -> None:
fn = {
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaB",
}
assert get_latest_layer_version(fn) == 3
| GetLatestLayerVersionTest |
python | RaRe-Technologies__gensim | gensim/test/test_scripts.py | {
"start": 4208,
"end": 6158
} | class ____(unittest.TestCase):
def setUp(self):
self.datapath = datapath('word2vec_pre_kv_c')
self.output_folder = get_tmpfile('w2v2t_test')
self.metadata_file = self.output_folder + '_metadata.tsv'
self.tensor_file = self.output_folder + '_tensor.tsv'
self.vector_file = self.output_folder + '_vector.tsv'
def test_conversion(self):
word2vec2tensor(word2vec_model_path=self.datapath, tensor_filename=self.output_folder)
with utils.open(self.metadata_file, 'rb') as f:
metadata = f.readlines()
with utils.open(self.tensor_file, 'rb') as f:
vectors = f.readlines()
# check if number of words and vector size in tensor file line up with word2vec
with utils.open(self.datapath, 'rb') as f:
first_line = f.readline().strip()
number_words, vector_size = map(int, first_line.split(b' '))
self.assertTrue(len(metadata) == len(vectors) == number_words,
('Metadata file %s and tensor file %s imply different number of rows.'
% (self.metadata_file, self.tensor_file)))
# grab metadata and vectors from written file
metadata = [word.strip() for word in metadata]
vectors = [vector.replace(b'\t', b' ') for vector in vectors]
# get the originaly vector KV model
orig_model = KeyedVectors.load_word2vec_format(self.datapath, binary=False)
# check that the KV model and tensor files have the same values key-wise
for word, vector in zip(metadata, vectors):
word_string = word.decode("utf8")
vector_string = vector.decode("utf8")
vector_array = np.array(list(map(float, vector_string.split())))
np.testing.assert_almost_equal(orig_model[word_string], vector_array, decimal=5)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| TestWord2Vec2Tensor |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/dep_snapshot.py | {
"start": 2007,
"end": 2606
} | class ____(
NamedTuple(
"_DependencyStructureSnapshot",
[("node_invocation_snaps", Sequence["NodeInvocationSnap"])],
)
):
def __new__(cls, node_invocation_snaps: Sequence["NodeInvocationSnap"]):
return super().__new__(
cls,
sorted(
check.sequence_param(
node_invocation_snaps, "node_invocation_snaps", of_type=NodeInvocationSnap
),
key=lambda si: si.node_name,
),
)
# Not actually serialized. Used within the dependency index
| DependencyStructureSnapshot |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC402_numpy.py | {
"start": 343,
"end": 1618
} | class ____:
# OK
def foo(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
Yields
-------
str
A string
"""
yield 'test'
# DOC402
def bar(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
"""
yield 'test'
import typing
# OK
def foo() -> typing.Generator[None, None, None]:
"""
Do something
"""
yield None
# OK
def foo() -> typing.Generator[None, None, None]:
"""
Do something
"""
yield
# DOC402
def foo() -> typing.Generator[int | None, None, None]:
"""
Do something
"""
yield None
yield 1
# DOC402
def foo() -> typing.Generator[int, None, None]:
"""
Do something
"""
yield None
# OK
def foo():
"""
Do something
"""
yield None
# OK
def foo():
"""
Do something
"""
yield
# DOC402
def foo():
"""
Do something
"""
yield None
yield 1
# DOC402
def foo():
"""
Do something
"""
yield 1
yield
# DOC402
def bar() -> typing.Iterator[int | None]:
"""
Do something
"""
yield
| Bar |
python | pytorch__pytorch | test/test_tensorboard.py | {
"start": 26925,
"end": 28844
} | class ____(BaseTestCase):
@skipIfNoMatplotlib
def test_figure(self):
writer = self.createSummaryWriter()
figure, axes = plt.figure(), plt.gca()
circle1 = plt.Circle((0.2, 0.5), 0.2, color="r")
circle2 = plt.Circle((0.8, 0.5), 0.2, color="g")
axes.add_patch(circle1)
axes.add_patch(circle2)
plt.axis("scaled")
plt.tight_layout()
writer.add_figure("add_figure/figure", figure, 0, close=False)
self.assertTrue(plt.fignum_exists(figure.number))
writer.add_figure("add_figure/figure", figure, 1)
if matplotlib.__version__ != "3.3.0":
self.assertFalse(plt.fignum_exists(figure.number))
else:
print(
"Skipping fignum_exists, see https://github.com/matplotlib/matplotlib/issues/18163"
)
writer.close()
@skipIfNoMatplotlib
def test_figure_list(self):
writer = self.createSummaryWriter()
figures = []
for i in range(5):
figure = plt.figure()
plt.plot([i * 1, i * 2, i * 3], label="Plot " + str(i))
plt.xlabel("X")
plt.xlabel("Y")
plt.legend()
plt.tight_layout()
figures.append(figure)
writer.add_figure("add_figure/figure_list", figures, 0, close=False)
self.assertTrue(
all(plt.fignum_exists(figure.number) is True for figure in figures)
) # noqa: F812
writer.add_figure("add_figure/figure_list", figures, 1)
if matplotlib.__version__ != "3.3.0":
self.assertTrue(
all(plt.fignum_exists(figure.number) is False for figure in figures)
) # noqa: F812
else:
print(
"Skipping fignum_exists, see https://github.com/matplotlib/matplotlib/issues/18163"
)
writer.close()
| TestTensorBoardFigure |
python | google__jax | jax/_src/mesh.py | {
"start": 4930,
"end": 7331
} | class ____:
axis_names: tuple[MeshAxisName, ...]
shape_tuple: tuple[tuple[str, int], ...]
axis_types: tuple[AxisType, ...]
@functools.cached_property
def are_all_axes_manual(self) -> bool:
return all_axis_types_match(self.axis_types, AxisType.Manual)
@functools.cached_property
def are_all_axes_auto(self) -> bool:
return all_axis_types_match(self.axis_types, AxisType.Auto)
@functools.cached_property
def are_all_axes_explicit(self) -> bool:
return all_axis_types_match(self.axis_types, AxisType.Explicit)
@functools.cached_property
def _are_all_axes_auto_or_manual(self) -> bool:
if not self.axis_types:
return False
return all(t == AxisType.Auto or t == AxisType.Manual
for t in self.axis_types)
@functools.cached_property
def _are_all_axes_explicit_or_manual(self) -> bool:
if not self.axis_types:
return False
return all(t == AxisType.Explicit or t == AxisType.Manual
for t in self.axis_types)
@functools.cached_property
def _any_axis_manual(self) -> bool:
return any_axis_types_match(self.axis_types, AxisType.Manual)
@functools.cached_property
def _any_axis_auto(self) -> bool:
return any_axis_types_match(self.axis_types, AxisType.Auto)
@functools.cached_property
def _any_axis_explicit(self) -> bool:
return any_axis_types_match(self.axis_types, AxisType.Explicit)
@functools.cached_property
def _any_axis_auto_or_manual(self) -> bool:
if not self.axis_types:
return False
return any(t == AxisType.Auto or t == AxisType.Manual
for t in self.axis_types)
@functools.cached_property
def auto_axes(self):
return tuple(n for n, t in safe_zip(self.axis_names, self.axis_types)
if t == AxisType.Auto)
@functools.cached_property
def explicit_axes(self):
return tuple(n for n, t in safe_zip(self.axis_names, self.axis_types)
if t == AxisType.Explicit)
@functools.cached_property
def manual_axes(self):
return tuple(n for n, t in safe_zip(self.axis_names, self.axis_types)
if t == AxisType.Manual)
@functools.cached_property
def _name_to_type(self):
return dict(safe_zip(self.axis_names, self.axis_types))
def _unpicke_mesh(devices, axis_names, axis_types):
return Mesh(devices, axis_names, axis_types)
_mesh_object_dict = {} # type: ignore
| BaseMesh |
python | getsentry__sentry | src/sentry/integrations/base.py | {
"start": 4546,
"end": 5683
} | class ____(StrEnum):
MESSAGING = "messaging"
PROJECT_MANAGEMENT = "project_management"
SOURCE_CODE_MANAGEMENT = "source_code_management"
ON_CALL_SCHEDULING = "on_call_scheduling"
IDENTITY = "identity" # for identity pipelines
GENERAL = "general" # for processes that span multiple integration domains
INTEGRATION_TYPE_TO_PROVIDER = {
IntegrationDomain.MESSAGING: [
IntegrationProviderSlug.SLACK,
IntegrationProviderSlug.DISCORD,
IntegrationProviderSlug.MSTEAMS,
],
IntegrationDomain.PROJECT_MANAGEMENT: [
IntegrationProviderSlug.JIRA,
IntegrationProviderSlug.JIRA_SERVER,
],
IntegrationDomain.SOURCE_CODE_MANAGEMENT: [
IntegrationProviderSlug.GITHUB,
IntegrationProviderSlug.GITHUB_ENTERPRISE,
IntegrationProviderSlug.GITLAB,
IntegrationProviderSlug.BITBUCKET,
IntegrationProviderSlug.BITBUCKET_SERVER,
IntegrationProviderSlug.AZURE_DEVOPS,
],
IntegrationDomain.ON_CALL_SCHEDULING: [
IntegrationProviderSlug.PAGERDUTY,
IntegrationProviderSlug.OPSGENIE,
],
}
| IntegrationDomain |
python | davidhalter__jedi | jedi/inference/filters.py | {
"start": 12157,
"end": 12493
} | class ____(_AttributeOverwriteMixin, ValueWrapper,
metaclass=_OverwriteMeta):
pass
def publish_method(method_name):
def decorator(func):
dct = func.__dict__.setdefault('registered_overwritten_methods', {})
dct[method_name] = func
return func
return decorator
| AttributeOverwrite |
python | gevent__gevent | src/greentest/3.9/test_subprocess.py | {
"start": 63482,
"end": 71733
} | class ____(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
| RunFuncTestCase |
python | sphinx-doc__sphinx | tests/test_ext_intersphinx/test_ext_intersphinx.py | {
"start": 24059,
"end": 35517
} | class ____:
"""Tests for sphinx.ext.intersphinx._strip_basic_auth()"""
def test_auth_stripped(self):
"""Basic auth creds stripped from URL containing creds"""
url = 'https://user:12345@domain.com/project/objects.inv'
expected = 'https://domain.com/project/objects.inv'
actual = _strip_basic_auth(url)
assert actual == expected
def test_no_auth(self):
"""Url unchanged if param doesn't contain basic auth creds"""
url = 'https://domain.com/project/objects.inv'
expected = 'https://domain.com/project/objects.inv'
actual = _strip_basic_auth(url)
assert actual == expected
def test_having_port(self):
"""Basic auth creds correctly stripped from URL containing creds even if URL
contains port
"""
url = 'https://user:12345@domain.com:8080/project/objects.inv'
expected = 'https://domain.com:8080/project/objects.inv'
actual = _strip_basic_auth(url)
assert actual == expected
def test_getsafeurl_authed() -> None:
"""_get_safe_url() with a url with basic auth"""
url = 'https://user:12345@domain.com/project/objects.inv'
expected = 'https://user@domain.com/project/objects.inv'
actual = _get_safe_url(url)
assert actual == expected
def test_getsafeurl_authed_having_port() -> None:
"""_get_safe_url() with a url with basic auth having port"""
url = 'https://user:12345@domain.com:8080/project/objects.inv'
expected = 'https://user@domain.com:8080/project/objects.inv'
actual = _get_safe_url(url)
assert actual == expected
def test_getsafeurl_unauthed() -> None:
"""_get_safe_url() with a url without basic auth"""
url = 'https://domain.com/project/objects.inv'
expected = 'https://domain.com/project/objects.inv'
actual = _get_safe_url(url)
assert actual == expected
def test_inspect_main_noargs(capsys):
"""inspect_main interface, without arguments"""
assert inspect_main([]) == 1
expected = (
'Print out an inventory file.\n'
'Error: must specify local path or URL to an inventory file.'
)
stdout, stderr = capsys.readouterr()
assert stdout == ''
assert stderr == expected + '\n'
def test_inspect_main_file(capsys, tmp_path):
"""inspect_main interface, with file argument"""
inv_file = tmp_path / 'inventory'
inv_file.write_bytes(INVENTORY_V2)
inspect_main([str(inv_file)])
stdout, stderr = capsys.readouterr()
assert stdout.startswith('c:function\n')
assert stderr == ''
def test_inspect_main_url(capsys):
"""inspect_main interface, with url argument"""
class InventoryHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200, 'OK')
self.end_headers()
self.wfile.write(INVENTORY_V2)
def log_message(*args, **kwargs):
# Silenced.
pass
with http_server(InventoryHandler) as server:
url = f'http://localhost:{server.server_port}/{INVENTORY_FILENAME}'
inspect_main([url])
stdout, stderr = capsys.readouterr()
assert stdout.startswith('c:function\n')
assert stderr == ''
@pytest.mark.sphinx('html', testroot='ext-intersphinx-role', copy_test_root=True)
def test_intersphinx_role(app: SphinxTestApp) -> None:
inv_file = app.srcdir / 'inventory'
inv_file.write_bytes(INVENTORY_V2)
app.config.intersphinx_mapping = {
'inv': ('https://example.org/', str(inv_file)),
}
app.config.intersphinx_cache_limit = 0
app.config.nitpicky = True
# load the inventory and check if it's done correctly
validate_intersphinx_mapping(app, app.config)
load_mappings(app)
app.build()
content = (app.outdir / 'index.html').read_text(encoding='utf8')
warnings = strip_escape_sequences(app.warning.getvalue()).splitlines()
index_path = app.srcdir / 'index.rst'
assert warnings == [
f"{index_path}:21: WARNING: role for external cross-reference not found in domain 'py': 'nope' [intersphinx.external]",
f"{index_path}:28: WARNING: role for external cross-reference not found in domains 'cpp', 'std': 'nope' [intersphinx.external]",
f"{index_path}:39: WARNING: inventory for external cross-reference not found: 'invNope' [intersphinx.external]",
f"{index_path}:44: WARNING: role for external cross-reference not found in domain 'c': 'function' (perhaps you meant one of: 'func', 'identifier', 'type') [intersphinx.external]",
f"{index_path}:45: WARNING: role for external cross-reference not found in domains 'cpp', 'std': 'function' (perhaps you meant one of: 'cpp:func', 'cpp:identifier', 'cpp:type') [intersphinx.external]",
f'{index_path}:9: WARNING: external py:mod reference target not found: module3 [ref.mod]',
f'{index_path}:14: WARNING: external py:mod reference target not found: module10 [ref.mod]',
f'{index_path}:19: WARNING: external py:meth reference target not found: inv:Foo.bar [ref.meth]',
]
html = '<a class="reference external" href="https://example.org/{}" title="(in foo v2.0)">'
assert html.format('foo.html#module-module1') in content
assert html.format('foo.html#module-module2') in content
assert html.format('sub/foo.html#module1.func') in content
# default domain
assert html.format('index.html#std_uint8_t') in content
# std roles without domain prefix
assert html.format('docname.html') in content
assert html.format('index.html#cmdoption-ls-l') in content
# explicit inventory
assert html.format('cfunc.html#CFunc') in content
# explicit title
assert html.format('index.html#foons') in content
@pytest.mark.sphinx('html', testroot='root')
@pytest.mark.parametrize(
('cache_limit', 'expected_expired'),
[
(5, False), # cache for 5 days
(1, True), # cache for 1 day
(0, True), # cache for 0 days
(-1, False), # cache forever
],
)
def test_intersphinx_cache_limit(app, monkeypatch, cache_limit, expected_expired):
url = 'https://example.org/'
app.config.intersphinx_cache_limit = cache_limit
app.config.intersphinx_mapping = {
'inv': (url, None),
}
app.config.intersphinx_timeout = None
# load the inventory and check if it's done correctly
intersphinx_cache: dict[str, InventoryCacheEntry] = {
url: ('inv', 0, {}), # Timestamp of last cache write is zero.
}
validate_intersphinx_mapping(app, app.config)
# The test's `now` is two days after the cache was created.
now = 2 * 86400
monkeypatch.setattr('time.time', lambda: now)
# `_fetch_inventory_group` calls `_fetch_inventory_data`.
# We replace it with a mock to test whether it has been called.
# If it has been called, it means the cache had expired.
monkeypatch.setattr(
'sphinx.ext.intersphinx._load._fetch_inventory_data',
mock.Mock(return_value=(b'', '')),
)
mock_fetch_inventory = mock.Mock(return_value=_Inventory({}))
monkeypatch.setattr(
'sphinx.ext.intersphinx._load._load_inventory', mock_fetch_inventory
)
for name, (uri, locations) in app.config.intersphinx_mapping.values():
project = _IntersphinxProject(name=name, target_uri=uri, locations=locations)
updated = _fetch_inventory_group(
project=project,
cache=intersphinx_cache,
now=now,
config=_InvConfig.from_config(app.config),
srcdir=app.srcdir,
cache_dir=None,
)
# If we hadn't mocked `_fetch_inventory_data`, it would've made
# a request to `https://example.org/` and found no inventory
# file. That would've been an error, and `updated` would've been
# False even if the cache had expired. The mock makes it behave
# "correctly".
assert updated is expected_expired
# Double-check: If the cache was expired, `mock_fetch_inventory`
# must've been called.
assert mock_fetch_inventory.called is expected_expired
def test_intersphinx_fetch_inventory_group_url():
class InventoryHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200, 'OK')
self.end_headers()
self.wfile.write(INVENTORY_V2)
def log_message(*args, **kwargs):
# Silenced.
pass
with http_server(InventoryHandler) as server:
url1 = f'http://localhost:{server.server_port}'
url2 = f'http://localhost:{server.server_port}/'
config = Config()
config.intersphinx_cache_limit = -1
config.intersphinx_mapping = {
'1': (url1, None),
'2': (url2, None),
}
now = int(time.time())
# we can use 'srcdir=None' since we are raising in _fetch_inventory_data
kwds = {
'cache': {},
'now': now,
'config': config,
'srcdir': None,
'cache_dir': None,
}
# We need an exception with its 'args' attribute set (see error
# handling in sphinx.ext.intersphinx._load._fetch_inventory_group).
side_effect = ValueError('')
project1 = _IntersphinxProject(
name='1', target_uri=url1, locations=(url1, None)
)
with mock.patch(
'sphinx.ext.intersphinx._load._fetch_inventory_data',
side_effect=side_effect,
) as mockfn:
assert not _fetch_inventory_group(project=project1, **kwds)
mockfn.assert_any_call(
target_uri=url1,
inv_location=url1,
config=config,
srcdir=None,
cache_path=None,
)
mockfn.assert_any_call(
target_uri=url1,
inv_location=url1 + '/' + INVENTORY_FILENAME,
config=config,
srcdir=None,
cache_path=None,
)
project2 = _IntersphinxProject(
name='2', target_uri=url2, locations=(url2, None)
)
with mock.patch(
'sphinx.ext.intersphinx._load._fetch_inventory_data',
side_effect=side_effect,
) as mockfn:
assert not _fetch_inventory_group(project=project2, **kwds)
mockfn.assert_any_call(
target_uri=url2,
inv_location=url2,
config=config,
srcdir=None,
cache_path=None,
)
mockfn.assert_any_call(
target_uri=url2,
inv_location=url2 + INVENTORY_FILENAME,
config=config,
srcdir=None,
cache_path=None,
)
@pytest.mark.sphinx('html', testroot='root')
def test_inventory_text_version(tmp_path, app):
inv_file = tmp_path / 'inventory'
inv_file.write_bytes(INVENTORY_V2_TEXT_VERSION)
set_config(
app,
{
'python': ('https://docs.python.org/', str(inv_file)),
},
)
# load the inventory and check if non-numeric version is handled correctly
validate_intersphinx_mapping(app, app.config)
load_mappings(app)
rn = reference_check(app, 'py', 'mod', 'module1', 'foo')
assert isinstance(rn, nodes.reference)
assert rn['refuri'] == 'https://docs.python.org/foo.html#module-module1'
assert rn['reftitle'] == '(in foo stable)'
assert rn[0].astext() == 'Long Module desc'
| TestStripBasicAuth |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 16388,
"end": 16520
} | class ____(
BaseGroupingComponent[HostnameGroupingComponent | SaltGroupingComponent]
):
id: str = "hpkp"
| HPKPGroupingComponent |
python | apache__airflow | airflow-core/tests/unit/models/test_xcom_arg.py | {
"start": 1708,
"end": 5280
} | class ____:
def test_xcom_ctor(self, dag_maker):
python_op = build_python_op(dag_maker)
actual = XComArg(python_op, "test_key")
assert actual
assert actual.operator == python_op
assert actual.key == "test_key"
# Asserting the overridden __eq__ method
assert actual == XComArg(python_op, "test_key")
expected_str = (
"{{ task_instance.xcom_pull(task_ids='test_xcom_op', dag_id='test_xcom_dag', key='test_key') }}"
)
assert str(actual) == expected_str
assert (
f"echo {actual}" == "echo {{ task_instance.xcom_pull(task_ids='test_xcom_op', "
"dag_id='test_xcom_dag', key='test_key') }}"
)
def test_xcom_key_is_empty_str(self, dag_maker):
python_op = build_python_op(dag_maker)
actual = XComArg(python_op, key="")
assert actual.key == ""
assert (
str(actual) == "{{ task_instance.xcom_pull(task_ids='test_xcom_op', "
"dag_id='test_xcom_dag', key='') }}"
)
def test_set_downstream(self, dag_maker):
with dag_maker("test_set_downstream"):
op_a = BashOperator(task_id="a", bash_command="echo a")
op_b = BashOperator(task_id="b", bash_command="echo b")
bash_op1 = BashOperator(task_id="c", bash_command="echo c")
bash_op2 = BashOperator(task_id="d", bash_command="echo c")
xcom_args_a = XComArg(op_a)
xcom_args_b = XComArg(op_b)
bash_op1 >> xcom_args_a >> xcom_args_b >> bash_op2
dag_maker.create_dagrun()
assert op_a in bash_op1.downstream_list
assert op_b in op_a.downstream_list
assert bash_op2 in op_b.downstream_list
def test_set_upstream(self, dag_maker):
with dag_maker("test_set_upstream"):
op_a = BashOperator(task_id="a", bash_command="echo a")
op_b = BashOperator(task_id="b", bash_command="echo b")
bash_op1 = BashOperator(task_id="c", bash_command="echo c")
bash_op2 = BashOperator(task_id="d", bash_command="echo c")
xcom_args_a = XComArg(op_a)
xcom_args_b = XComArg(op_b)
bash_op1 << xcom_args_a << xcom_args_b << bash_op2
dag_maker.create_dagrun()
assert op_a in bash_op1.upstream_list
assert op_b in op_a.upstream_list
assert bash_op2 in op_b.upstream_list
def test_xcom_arg_property_of_base_operator(self, dag_maker):
with dag_maker("test_xcom_arg_property_of_base_operator"):
op_a = BashOperator(task_id="a", bash_command="echo a")
dag_maker.create_dagrun()
assert op_a.output == XComArg(op_a)
def test_xcom_key_getitem_not_str(self, dag_maker):
python_op = build_python_op(dag_maker)
actual = XComArg(python_op)
with pytest.raises(ValueError, match="XComArg only supports str lookup, received int"):
actual[1]
def test_xcom_key_getitem(self, dag_maker):
python_op = build_python_op(dag_maker)
actual = XComArg(python_op, key="another_key")
assert actual.key == "another_key"
actual_new_key = actual["another_key_2"]
assert actual_new_key.key == "another_key_2"
def test_xcom_not_iterable(self, dag_maker):
python_op = build_python_op(dag_maker)
actual = XComArg(python_op)
with pytest.raises(TypeError) as ctx:
list(actual)
assert str(ctx.value) == "'XComArg' object is not iterable"
@pytest.mark.system
| TestXComArgBuild |
python | weaviate__weaviate-python-client | weaviate/collections/classes/generative.py | {
"start": 7772,
"end": 8714
} | class ____(_GenerativeConfigRuntime):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.FRIENDLIAI, frozen=True, exclude=True
)
base_url: Optional[AnyHttpUrl]
max_tokens: Optional[int]
model: Optional[str]
n: Optional[int]
temperature: Optional[float]
top_p: Optional[float]
def _to_grpc(self, opts: _GenerativeConfigRuntimeOptions) -> generative_pb2.GenerativeProvider:
self._validate_multi_modal(opts)
return generative_pb2.GenerativeProvider(
return_metadata=opts.return_metadata,
friendliai=generative_pb2.GenerativeFriendliAI(
base_url=_parse_anyhttpurl(self.base_url),
max_tokens=self.max_tokens,
model=self.model,
n=self.n,
temperature=self.temperature,
top_p=self.top_p,
),
)
| _GenerativeFriendliai |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolExplicit1.py | {
"start": 865,
"end": 1036
} | class ____(Protocol1, Protocol3):
cm1 = 3
cm10 = 3
def __init__(self):
self.im1 = 3
self.im10 = 10
self.cm11 = 3
Concrete4()
| Concrete4 |
python | urllib3__urllib3 | src/urllib3/_collections.py | {
"start": 1858,
"end": 5264
} | class ____(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
_container: typing.OrderedDict[_KT, _VT]
_maxsize: int
dispose_func: typing.Callable[[_VT], None] | None
lock: RLock
def __init__(
self,
maxsize: int = 10,
dispose_func: typing.Callable[[_VT], None] | None = None,
) -> None:
super().__init__()
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = OrderedDict()
self.lock = RLock()
def __getitem__(self, key: _KT) -> _VT:
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key: _KT, value: _VT) -> None:
evicted_item = None
with self.lock:
# Possibly evict the existing value of 'key'
try:
# If the key exists, we'll overwrite it, which won't change the
# size of the pool. Because accessing a key should move it to
# the end of the eviction line, we pop it out first.
evicted_item = key, self._container.pop(key)
self._container[key] = value
except KeyError:
# When the key does not exist, we insert the value first so that
# evicting works in all cases, including when self._maxsize is 0
self._container[key] = value
if len(self._container) > self._maxsize:
# If we didn't evict an existing value, and we've hit our maximum
# size, then we have to evict the least recently used item from
# the beginning of the container.
evicted_item = self._container.popitem(last=False)
# After releasing the lock on the pool, dispose of any evicted value.
if evicted_item is not None and self.dispose_func:
_, evicted_value = evicted_item
self.dispose_func(evicted_value)
def __delitem__(self, key: _KT) -> None:
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self) -> int:
with self.lock:
return len(self._container)
def __iter__(self) -> typing.NoReturn:
raise NotImplementedError(
"Iteration over this class is unlikely to be threadsafe."
)
def clear(self) -> None:
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self) -> set[_KT]: # type: ignore[override]
with self.lock:
return set(self._container.keys())
| RecentlyUsedContainer |
python | Textualize__textual | src/textual/drivers/win32.py | {
"start": 2790,
"end": 3149
} | class ____(Union):
"""https://docs.microsoft.com/en-us/windows/console/input-record-str"""
_fields_ = [
("KeyEvent", KEY_EVENT_RECORD),
("MouseEvent", MOUSE_EVENT_RECORD),
("WindowBufferSizeEvent", WINDOW_BUFFER_SIZE_RECORD),
("MenuEvent", MENU_EVENT_RECORD),
("FocusEvent", FOCUS_EVENT_RECORD),
]
| InputEvent |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/triggers/message_bus.py | {
"start": 3496,
"end": 5774
} | class ____(BaseAzureServiceBusTrigger):
"""
Trigger for Azure Service Bus Queue message processing.
This trigger monitors one or more Azure Service Bus queues for incoming messages.
When messages arrive, they are processed and yielded as trigger events that can
be consumed by downstream tasks.
Example:
>>> trigger = AzureServiceBusQueueTrigger(
... queues=["queue1", "queue2"],
... azure_service_bus_conn_id="my_asb_conn",
... poll_interval=30,
... )
:param queues: List of queue names to monitor
:param poll_interval: Time interval between polling operations (seconds)
:param azure_service_bus_conn_id: Connection ID for Azure Service Bus
:param max_wait_time: Maximum time to wait for messages (seconds)
"""
def __init__(
self,
queues: list[str],
poll_interval: float | None = None,
azure_service_bus_conn_id: str | None = None,
max_wait_time: float | None = None,
) -> None:
super().__init__(poll_interval, azure_service_bus_conn_id, max_wait_time)
self.queues = queues
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
self.__class__.__module__ + "." + self.__class__.__qualname__,
{
"azure_service_bus_conn_id": self.connection_id,
"queues": self.queues,
"poll_interval": self.poll_interval,
"max_wait_time": self.max_wait_time,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
read_queue_message_async = sync_to_async(self.message_hook.read_message)
while True:
for queue_name in self.queues:
message = await read_queue_message_async(
queue_name=queue_name, max_wait_time=self.max_wait_time
)
if message:
yield TriggerEvent(
{
"message": BaseAzureServiceBusTrigger._get_message_body(message),
"queue": queue_name,
}
)
break
await asyncio.sleep(self.poll_interval)
| AzureServiceBusQueueTrigger |
python | astropy__astropy | astropy/samp/tests/web_profile_test_helpers.py | {
"start": 2761,
"end": 7691
} | class ____(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {
"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}],
}
self._response_bindings = {}
self.hub = hub
self._registration_lock = threading.Lock()
self._registered_event = threading.Event()
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Wait until we are actually registered before trying to do
# anything, to avoid busy looping
# Watch for callbacks here
self._registered_event.wait()
with self._registration_lock:
if not self._is_registered:
return
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result["samp.methodName"] == "receiveNotification":
self.receive_notification(
self._private_key, *result["samp.params"]
)
elif result["samp.methodName"] == "receiveCall":
self.receive_call(self._private_key, *result["samp.params"])
elif result["samp.methodName"] == "receiveResponse":
self.receive_response(self._private_key, *result["samp.params"])
self.hub.disconnect()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError(
"Registration failed - samp.self-id was not set by the hub."
)
if result["samp.private-key"] == "":
raise SAMPClientError(
"Registration failed - samp.private-key was not set by the hub."
)
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
# Let the client thread proceed
self._registered_event.set()
else:
raise SAMPClientError(
"Unable to register to the SAMP Hub. Hub proxy not connected."
)
def unregister(self):
# We have to hold the registration lock if the client is callable
# to avoid a race condition where the client queries the hub for
# pushCallbacks after it has already been unregistered from the hub
with self._registration_lock:
super().unregister()
| SAMPWebClient |
python | kamyu104__LeetCode-Solutions | Python/max-stack.py | {
"start": 286,
"end": 1826
} | class ____(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.__idx_to_val = collections.defaultdict(int)
self.__val_to_idxs = collections.defaultdict(list)
self.__top = None
self.__max = None
def push(self, x):
"""
:type x: int
:rtype: void
"""
idx = self.__val_to_idxs[self.__top][-1]+1 if self.__val_to_idxs else 0
self.__idx_to_val[idx] = x
self.__val_to_idxs[x].append(idx)
self.__top = x
self.__max = max(self.__max, x)
def pop(self):
"""
:rtype: int
"""
val = self.__top
self.__remove(val)
return val
def top(self):
"""
:rtype: int
"""
return self.__top
def peekMax(self):
"""
:rtype: int
"""
return self.__max
def popMax(self):
"""
:rtype: int
"""
val = self.__max
self.__remove(val)
return val
def __remove(self, val):
idx = self.__val_to_idxs[val][-1]
self.__val_to_idxs[val].pop()
if not self.__val_to_idxs[val]:
del self.__val_to_idxs[val]
del self.__idx_to_val[idx]
if val == self.__top:
self.__top = self.__idx_to_val[max(self.__idx_to_val.keys())] if self.__idx_to_val else None
if val == self.__max:
self.__max = max(self.__val_to_idxs.keys()) if self.__val_to_idxs else None
| MaxStack |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/kubernetes_engine.py | {
"start": 20484,
"end": 23728
} | class ____(GKEOperatorMixin, KubernetesInstallKueueOperator):
"""
Installs Kueue of specific version inside Cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEStartKueueInsideClusterOperator`
.. seealso::
For more details about Kueue have a look at the reference:
https://kueue.sigs.k8s.io/docs/overview/
:param location: The name of the Google Kubernetes Engine zone or region in which the
cluster resides, e.g. 'us-central1-a'
:param cluster_name: The name of the Google Kubernetes Engine cluster.
:param use_internal_ip: Use the internal IP address as the endpoint.
:param use_dns_endpoint: Use the DNS address as the endpoint.
:param project_id: The Google Developers Console project id
:param gcp_conn_id: The Google cloud connection id to use. This allows for
users to specify a service account.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
enable_tcp_keepalive = True
template_fields = tuple(
set(GKEOperatorMixin.template_fields) | set(KubernetesInstallKueueOperator.template_fields)
)
operator_extra_links = (KubernetesEngineClusterLink(),)
def __init__(
self,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
use_dns_endpoint: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
self.use_dns_endpoint = use_dns_endpoint
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
"location": self.location,
}
def execute(self, context: Context):
cluster = self.cluster_hook.get_cluster(name=self.cluster_name, project_id=self.project_id)
KubernetesEngineClusterLink.persist(context=context, cluster=cluster)
if self.cluster_hook.check_cluster_autoscaling_ability(cluster=cluster):
super().execute(context)
else:
self.log.info(
"Cluster doesn't have ability to autoscale, will not install Kueue inside. Aborting"
)
| GKEStartKueueInsideClusterOperator |
python | huggingface__transformers | tests/models/upernet/test_modeling_upernet.py | {
"start": 1443,
"end": 4601
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
num_channels=3,
num_stages=4,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 1, 1],
is_training=True,
use_labels=True,
intermediate_size=37,
hidden_act="gelu",
type_sequence_label_size=10,
initializer_range=0.02,
out_features=["stage2", "stage3", "stage4"],
num_labels=3,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.num_stages = num_stages
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.out_features = out_features
self.num_labels = num_labels
self.scope = scope
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_backbone_config(self):
return ConvNextConfig(
num_channels=self.num_channels,
num_stages=self.num_stages,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
is_training=self.is_training,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
out_features=self.out_features,
)
def get_config(self):
return UperNetConfig(
backbone_config=self.get_backbone_config(),
backbone=None,
hidden_size=64,
pool_scales=[1, 2, 3, 6],
use_auxiliary_head=True,
auxiliary_loss_weight=0.4,
auxiliary_in_channels=40,
auxiliary_channels=32,
auxiliary_num_convs=1,
auxiliary_concat_input=False,
loss_ignore_index=255,
num_labels=self.num_labels,
)
def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels):
model = UperNetForSemanticSegmentation(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
labels,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| UperNetModelTester |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/validators/detector_workflow.py | {
"start": 8486,
"end": 10145
} | class ____(CamelSnakeSerializer):
"""
Connect/disconnect multiple workflows to a single detector all at once.
"""
detector_id = serializers.IntegerField(required=True)
workflow_ids = serializers.ListField(child=serializers.IntegerField(), required=True)
def create(self, validated_data):
validate_workflows_exist(validated_data["workflow_ids"], self.context["organization"])
validate_detectors_exist_and_have_permissions(
[validated_data["detector_id"]], self.context["organization"], self.context["request"]
)
existing_detector_workflows = list(
DetectorWorkflow.objects.filter(
detector_id=validated_data["detector_id"],
)
)
new_workflow_ids = set(validated_data["workflow_ids"]) - {
dw.workflow_id for dw in existing_detector_workflows
}
detector_workflows_to_add: list[dict[Literal["detector_id", "workflow_id"], int]] = [
{"detector_id": validated_data["detector_id"], "workflow_id": workflow_id}
for workflow_id in new_workflow_ids
]
detector_workflows_to_remove = [
dw
for dw in existing_detector_workflows
if dw.workflow_id not in validated_data["workflow_ids"]
]
perform_bulk_detector_workflow_operations(
detector_workflows_to_add,
detector_workflows_to_remove,
self.context["request"],
self.context["organization"],
)
return list(DetectorWorkflow.objects.filter(detector_id=validated_data["detector_id"]))
| BulkDetectorWorkflowsValidator |
python | walkccc__LeetCode | solutions/289. Game of Life/289.py | {
"start": 0,
"end": 773
} | class ____:
def gameOfLife(self, board: list[list[int]]) -> None:
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
ones = 0
for x in range(max(0, i - 1), min(m, i + 2)):
for y in range(max(0, j - 1), min(n, j + 2)):
ones += board[x][y] & 1
# Any live cell with two or three live neighbors lives on to the next
# generation.
if board[i][j] == 1 and (ones == 3 or ones == 4):
board[i][j] |= 0b10
# Any dead cell with exactly three live neighbors becomes a live cell,
# as if by reproduction.
if board[i][j] == 0 and ones == 3:
board[i][j] |= 0b10
for i in range(m):
for j in range(n):
board[i][j] >>= 1
| Solution |
python | huggingface__transformers | src/transformers/models/convnext/modeling_convnext.py | {
"start": 8227,
"end": 9574
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.stages = nn.ModuleList()
drop_path_rates = [
x.tolist()
for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").split(config.depths)
]
prev_chs = config.hidden_sizes[0]
for i in range(config.num_stages):
out_chs = config.hidden_sizes[i]
stage = ConvNextStage(
config,
in_channels=prev_chs,
out_channels=out_chs,
stride=2 if i > 0 else 1,
depth=config.depths[i],
drop_path_rates=drop_path_rates[i],
)
self.stages.append(stage)
prev_chs = out_chs
def forward(
self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = False
) -> BaseModelOutputWithNoAttention:
all_hidden_states = [hidden_states] if output_hidden_states else None
for layer_module in self.stages:
hidden_states = layer_module(hidden_states)
if all_hidden_states is not None:
all_hidden_states.append(hidden_states)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
@auto_docstring
| ConvNextEncoder |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/configuration_phi4_multimodal.py | {
"start": 1327,
"end": 5238
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Phi4MultimodalVisionModel`]. It is used to instantiate a
Phi4Multimodal vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of
[microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 4304):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 27):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 448):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
crop_size (`int`, *optional*, defaults to 448):
Crop size for the input images.
image_token_id (`int`, *optional*, defaults to 200010):
The image token id.
feature_layer (`int`, *optional*, defaults to -2):
The index of the layer of the encoder from which to extract image features.
Example:
```python
>>> from transformers import Phi4MultimodalVisionConfig
>>> # Initializing a Phi4MultimodalVisionConfig with microsoft/Phi-4-multimodal-instruct style configuration
>>> configuration = Phi4MultimodalVisionConfig()
```"""
model_type = "phi4_multimodal_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1152,
intermediate_size=4304,
num_hidden_layers=27,
num_attention_heads=16,
num_channels=3,
image_size=448,
patch_size=14,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
crop_size: int = 448,
image_token_id: int = 200010,
feature_layer: int = -2,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.crop_size = crop_size
self.image_token_id = image_token_id
self.feature_layer = feature_layer
| Phi4MultimodalVisionConfig |
python | huggingface__transformers | src/transformers/image_processing_base.py | {
"start": 1398,
"end": 2027
} | class ____(BaseBatchFeature):
r"""
Holds the output of the image processor specific `__call__` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (`dict`):
Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).
tensor_type (`Union[None, str, TensorType]`, *optional*):
You can give a tensor_type here to convert the lists of integers in PyTorch/Numpy Tensors at
initialization.
"""
# TODO: (Amy) - factor out the common parts of this and the feature extractor
| BatchFeature |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/dependency.py | {
"start": 625,
"end": 12207
} | class ____:
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.sort_key = "%s_%s" % (self.parent._sort_key, prop.key)
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" % self.prop
)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork._ProcessAll(uow, self, False, True)
before_delete = unitofwork._ProcessAll(uow, self, True, True)
parent_saves = unitofwork._SaveUpdateAll(
uow, self.parent.primary_base_mapper
)
child_saves = unitofwork._SaveUpdateAll(
uow, self.mapper.primary_base_mapper
)
parent_deletes = unitofwork._DeleteAll(
uow, self.parent.primary_base_mapper
)
child_deletes = unitofwork._DeleteAll(
uow, self.mapper.primary_base_mapper
)
self.per_property_dependencies(
uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork._SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork._DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork._ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork._ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [(child_saves, False), (child_deletes, True)]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork._SaveUpdateAll(
uow, self.parent.base_mapper
)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork._DeleteAll(
uow, self.parent.base_mapper
)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. In the
# case of deletes we may try to load missing items here as well.
sum_ = state.manager[self.key].impl.get_all_pending(
state,
state.dict,
(
self._passive_delete_flag
if isdelete
else attributes.PASSIVE_NO_INITIALIZE
),
)
if not sum_:
continue
if isdelete:
before_delete = unitofwork._ProcessState(
uow, self, True, state
)
if parent_in_cycles:
parent_deletes = unitofwork._DeleteState(uow, state)
else:
after_save = unitofwork._ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork._SaveUpdateState(uow, state)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork._DeleteState(uow, child_state),
True,
)
else:
child_action = (
unitofwork._SaveUpdateState(uow, child_state),
False,
)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(
uow,
parent_saves,
parent_deletes,
child_action,
after_save,
before_delete,
isdelete,
childisdelete,
)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = (
attributes.PASSIVE_NO_INITIALIZE
| attributes.INCLUDE_PENDING_MUTATIONS
)
elif self.direction is MANYTOONE:
# here, we were hoping to optimize having to fetch many-to-one
# for history and ignore it, if there's no further cascades
# to take place. however there are too many less common conditions
# that still take place and tests in test_relationships /
# test_cascade etc. will still fail.
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = (
attributes.PASSIVE_OFF | attributes.INCLUDE_PENDING_MUTATIONS
)
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(s, self.key, passive)
if history and not history.empty():
return True
else:
return (
states
and not self.prop._is_self_referential
and self.mapper in uowcommit.mappers
)
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop,)
)
elif state is not None and not self.mapper._canload(
state, allow_subtypes=not self.enable_typechecks
):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError(
"Attempting to flush an item of type "
"%(x)s as a member of collection "
'"%(y)s". Expected an object of type '
"%(z)s or a polymorphic subclass of "
"this type. If %(x)s is a subclass of "
'%(z)s, configure mapper "%(zm)s" to '
"load this subtype polymorphically, or "
"set enable_typechecks=False to allow "
"any subtype to be accepted for flush. "
% {
"x": state.class_,
"y": self.prop,
"z": self.mapper.class_,
"zm": self.mapper,
}
)
else:
raise exc.FlushError(
"Attempting to flush an item of type "
"%(x)s as a member of collection "
'"%(y)s". Expected an object of type '
"%(z)s or a polymorphic subclass of "
"this type."
% {
"x": state.class_,
"y": self.prop,
"z": self.mapper.class_,
}
)
def _synchronize(self, state, child, associationrow, clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(
sorted([self.key] + [p.key for p in self.prop._reverse_property])
)
return uow.memo(("reverse_key", process_key), set)
def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
for x in related:
if not is_m2o_delete or x is not None:
uowcommit.register_post_update(
state, [r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
| _DependencyProcessor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property5.py | {
"start": 155,
"end": 357
} | class ____(Generic[T]):
def __init__(self, bar: T):
self._bar = bar
@property
def bar(self) -> T:
return self._bar
def bar_method(self) -> T:
return self._bar
| Foo |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/tutorials/dagster_tutorial/src/dagster_tutorial/components/tutorial.py | {
"start": 185,
"end": 1140
} | class ____(dg.Component, dg.Model, dg.Resolvable):
# The interface for the component
duckdb_database: str
etl_steps: list[ETL]
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
_etl_assets = []
for etl in self.etl_steps:
@dg.asset(
name=etl.table,
)
def _table(duckdb: DuckDBResource):
with duckdb.get_connection() as conn:
conn.execute(
f"""
create or replace table {etl.table} as (
select * from read_csv_auto('{etl.url_path}')
)
"""
)
_etl_assets.append(_table)
return dg.Definitions(
assets=_etl_assets,
resources={"duckdb": DuckDBResource(database=self.duckdb_database)},
)
# end_tutorial_component
| Tutorial |
python | pennersr__django-allauth | allauth/socialaccount/providers/auth0/provider.py | {
"start": 217,
"end": 340
} | class ____(ProviderAccount):
def get_avatar_url(self):
return self.account.extra_data.get("picture")
| Auth0Account |
python | PyCQA__pydocstyle | src/tests/test_cases/sections.py | {
"start": 10152,
"end": 12420
} | class ____: # noqa: D203
"""Test class."""
def test_method(self, test, another_test, z, _, x=1, y=2, _private_arg=1): # noqa: D213, D407
"""Test a valid args section.
Some long string with a \
line continuation.
Parameters
----------
test, another_test
Some parameters without type.
z : some parameter with a very long type description that requires a \
line continuation.
But no further description.
x, y : int
Some integer parameters.
"""
@expect("D417: Missing argument descriptions in the docstring "
"(argument(s) test, y, z are missing descriptions in "
"'test_missing_args' docstring)", arg_count=5)
def test_missing_args(self, test, x, y, z=3, t=1, _private=0): # noqa: D213, D407
"""Test a valid args section.
Parameters
----------
x, t : int
Some parameters.
"""
@classmethod
@expect("D417: Missing argument descriptions in the docstring "
"(argument(s) test, y, z are missing descriptions in "
"'test_missing_args_class_method' docstring)", arg_count=4)
def test_missing_args_class_method(cls, test, x, y, z=3): # noqa: D213, D407
"""Test a valid args section.
Parameters
----------
z
x
Another parameter. The parameters y, test below are
missing descriptions. The parameter z above is also missing
a description.
y
test
"""
@staticmethod
@expect("D417: Missing argument descriptions in the docstring "
"(argument(s) a, z are missing descriptions in "
"'test_missing_args_static_method' docstring)", arg_count=3)
def test_missing_args_static_method(a, x, y, z=3, t=1): # noqa: D213, D407
"""Test a valid args section.
Parameters
----------
x, y
Another parameter.
t : int
Yet another parameter.
"""
@staticmethod
def test_mixing_numpy_and_google(danger): # noqa: D213
"""Repro for #388.
Parameters
----------
danger
Zoneeeeee!
"""
| TestNumpy |
python | pyca__cryptography | tests/x509/test_x509.py | {
"start": 1720,
"end": 1807
} | class ____(x509.ExtensionType):
oid = x509.ObjectIdentifier("1.2.3.4")
| DummyExtension |
python | pennersr__django-allauth | tests/projects/headless_only/settings.py | {
"start": 2393,
"end": 4332
} | class ____(PBKDF2PasswordHasher):
"""
A subclass of PBKDF2PasswordHasher that uses 1 iteration.
This is for test purposes only. Never use anywhere else.
"""
iterations = 1
PASSWORD_HASHERS = [
"tests.projects.headless_only.settings.MyPBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
"openid_connect": {
"APPS": [
{
"provider_id": "unittest-server",
"name": "Unittest Server",
"client_id": "Unittest client_id",
"client_secret": "Unittest client_secret",
"settings": {
"server_url": "https://unittest.example.com",
},
},
{
"provider_id": "other-server",
"name": "Other Example Server",
"client_id": "other client_id",
"client_secret": "other client_secret",
"settings": {
"server_url": "https://other.example.com",
},
},
],
}
}
ACCOUNT_LOGIN_BY_CODE_ENABLED = True
ACCOUNT_ADAPTER = "tests.projects.common.adapters.AccountAdapter"
HEADLESS_ONLY = True
HEADLESS_FRONTEND_URLS = {
"account_confirm_email": "/spa/confirm-email?key={key}",
"account_reset_password": "/spa/password/reset/",
"account_reset_password_from_key": "/spa/password/reset/{key}/",
"account_signup": "/spa/signup",
"socialaccount_login_error": "/spa/error",
}
HEADLESS_SERVE_SPECIFICATION = True
MFA_SUPPORTED_TYPES = ["totp", "webauthn", "recovery_codes"]
MFA_PASSKEY_LOGIN_ENABLED = True
MFA_PASSKEY_SIGNUP_ENABLED = True
| MyPBKDF2PasswordHasher |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/parsing_ops.py | {
"start": 1265,
"end": 7281
} | class ____(dataset_ops.UnaryDataset):
"""A `Dataset` that parses `example` dataset into a `dict` dataset."""
def __init__(self, input_dataset, features, num_parallel_calls,
deterministic):
self._input_dataset = input_dataset
if not structure.are_compatible(
input_dataset.element_spec,
tensor_spec.TensorSpec([None], dtypes.string)):
raise TypeError("Input dataset should be a dataset of vectors of "
f"strings. Instead it is `{input_dataset.element_spec}`.")
self._num_parallel_calls = num_parallel_calls
if deterministic is None:
self._deterministic = "default"
elif deterministic:
self._deterministic = "true"
else:
self._deterministic = "false"
# pylint: disable=protected-access
self._features = parsing_ops._prepend_none_dimension(features)
params = parsing_ops._ParseOpParams.from_features(self._features, [
parsing_ops.VarLenFeature, parsing_ops.SparseFeature,
parsing_ops.FixedLenFeature, parsing_ops.FixedLenSequenceFeature,
parsing_ops.RaggedFeature
])
# pylint: enable=protected-access
self._sparse_keys = params.sparse_keys
self._sparse_types = params.sparse_types
self._ragged_keys = params.ragged_keys
self._ragged_value_types = params.ragged_value_types
self._ragged_split_types = params.ragged_split_types
self._dense_keys = params.dense_keys
self._dense_defaults = params.dense_defaults_vec
self._dense_shapes = params.dense_shapes_as_proto
self._dense_types = params.dense_types
input_dataset_shape = dataset_ops.get_legacy_output_shapes(
self._input_dataset)
self._element_spec = {}
for (key, value_type) in zip(params.sparse_keys, params.sparse_types):
self._element_spec[key] = sparse_tensor.SparseTensorSpec(
input_dataset_shape.concatenate([None]), value_type)
for (key, value_type, dense_shape) in zip(params.dense_keys,
params.dense_types,
params.dense_shapes):
self._element_spec[key] = tensor_spec.TensorSpec(
input_dataset_shape.concatenate(dense_shape), value_type)
for (key, value_type, splits_type) in zip(params.ragged_keys,
params.ragged_value_types,
params.ragged_split_types):
self._element_spec[key] = ragged_tensor.RaggedTensorSpec(
input_dataset_shape.concatenate([None]), value_type, 1, splits_type)
variant_tensor = (
gen_experimental_dataset_ops.parse_example_dataset_v2(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_parallel_calls,
self._dense_defaults,
self._sparse_keys,
self._dense_keys,
self._sparse_types,
self._dense_shapes,
deterministic=self._deterministic,
ragged_keys=self._ragged_keys,
ragged_value_types=self._ragged_value_types,
ragged_split_types=self._ragged_split_types,
**self._flat_structure))
super(_ParseExampleDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
@tf_export("data.experimental.parse_example_dataset")
@deprecation.deprecated(
None, "Use `tf.data.Dataset.map(tf.io.parse_example(...))` instead.")
def parse_example_dataset(features, num_parallel_calls=1, deterministic=None):
"""A transformation that parses `Example` protos into a `dict` of tensors.
Parses a number of serialized `Example` protos given in `serialized`. We refer
to `serialized` as a batch with `batch_size` many entries of individual
`Example` protos.
This op parses serialized examples into a dictionary mapping keys to `Tensor`,
`SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to
`VarLenFeature`, `RaggedFeature`, `SparseFeature`, and `FixedLenFeature`
objects. Each `VarLenFeature` and `SparseFeature` is mapped to a
`SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each
`FixedLenFeature` is mapped to a `Tensor`. See `tf.io.parse_example` for more
details about feature dictionaries.
Args:
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, `RaggedFeature`, and `SparseFeature` values.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of parsing processes to call in parallel.
deterministic: (Optional.) A boolean controlling whether determinism
should be traded for performance by allowing elements to be produced out
of order if some parsing calls complete faster than others. If
`deterministic` is `None`, the
`tf.data.Options.deterministic` dataset option (`True` by default) is used
to decide whether to produce elements deterministically.
Returns:
A dataset transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: if features argument is None.
"""
if features is None:
raise ValueError("Argument `features` is required, but not specified.")
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls,
deterministic)
if any(
isinstance(feature, parsing_ops.SparseFeature) or
isinstance(feature, parsing_ops.RaggedFeature)
for feature in features.values()):
# pylint: disable=protected-access
# pylint: disable=g-long-lambda
out_dataset = out_dataset.map(
lambda x: parsing_ops._construct_tensors_for_composite_features(
features, x),
num_parallel_calls=num_parallel_calls)
return out_dataset
return _apply_fn
| _ParseExampleDataset |
python | kamyu104__LeetCode-Solutions | Python/adjacent-increasing-subarrays-detection-i.py | {
"start": 37,
"end": 506
} | class ____(object):
def hasIncreasingSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
result = 0
curr, prev = 1, 0
for i in xrange(len(nums)-1):
if nums[i] < nums[i+1]:
curr += 1
else:
prev = curr
curr = 1
result = max(result, curr//2, min(prev, curr))
return result >= k
| Solution |
python | realpython__materials | python-isinstance/balls.py | {
"start": 106,
"end": 246
} | class ____(Ball):
def __init__(self, color, number):
super().__init__(color, shape="sphere")
self.number = number
| PoolBall |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.