language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | zarr-developers__zarr-python | tests/conftest.py | {
"start": 3741,
"end": 4705
} | class ____:
zarr_format: ZarrFormat
store: Literal["local", "fsspec", "memory", "zip"]
attributes: dict[str, Any] = field(default_factory=dict)
@pytest.fixture
async def async_group(request: pytest.FixtureRequest, tmpdir: LEGACY_PATH) -> AsyncGroup:
param: AsyncGroupRequest = request.param
store = await parse_store(param.store, str(tmpdir))
return await AsyncGroup.from_store(
store,
attributes=param.attributes,
zarr_format=param.zarr_format,
overwrite=False,
)
@pytest.fixture(params=["numpy", "cupy"])
def xp(request: pytest.FixtureRequest) -> Any:
"""Fixture to parametrize over numpy-like libraries"""
if request.param == "cupy":
request.node.add_marker(pytest.mark.gpu)
return pytest.importorskip(request.param)
@pytest.fixture(autouse=True)
def reset_config() -> Generator[None, None, None]:
config.reset()
yield
config.reset()
@dataclass
| AsyncGroupRequest |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 97570,
"end": 100212
} | class ____(TreeTestCase):
def test_no_index_set(self):
class SomeModel(MPTTModel):
class Meta:
app_label = "myapp"
tree_id_attr = SomeModel._mptt_meta.tree_id_attr
self.assertTrue(SomeModel._meta.get_field(tree_id_attr).db_index)
for key in ("right_attr", "left_attr", "level_attr"):
field_name = getattr(SomeModel._mptt_meta, key)
self.assertFalse(SomeModel._meta.get_field(field_name).db_index)
@staticmethod
def index_fields(model):
return (tuple(index.fields) for index in model._meta.indexes)
def test_indexes(self):
already_idx = [
[
Index(fields=["tree_id", "lft"], name="original_idx"),
Index(fields=["tree_id", "lft"], name="duplicate_idx"),
],
]
no_idx = [(), []]
some_idx = [[Index(fields=["tree_id"], name="test_some_idx")]]
for idx, case in enumerate(already_idx + no_idx + some_idx):
class Meta:
indexes = case
app_label = "myapp"
# Use type() here and in test_index_together_different_attr over
# an explicit class X(MPTTModel):, as this throws a warning that
# re-registering models with the same name (which is what an explicit
# class does) could cause errors. Kind of... weird, but surprisingly
# effective.
SomeModel = type(
str(f"model_{idx}"),
(MPTTModel,),
{
"Meta": Meta,
"__module__": __name__,
},
)
self.assertIn(("tree_id", "lft"), self.index_fields(SomeModel))
def test_index_together_different_attr(self):
already_idx = [
[
Index(fields=["abc", "def"], name="original_idx"),
Index(fields=("abc", "def"), name="duplicate_idx"),
]
]
no_idx = [(), []]
some_idx = [[Index(fields=["abc"], name="some_idx")]]
for idx, case in enumerate(already_idx + no_idx + some_idx):
class MPTTMeta:
tree_id_attr = "abc"
left_attr = "def"
class Meta:
indexes = case
app_label = "myapp"
SomeModel = type(
str(f"model__different_attr_{idx}"),
(MPTTModel,),
{"MPTTMeta": MPTTMeta, "Meta": Meta, "__module__": str(__name__)},
)
self.assertIn(("abc", "def"), self.index_fields(SomeModel))
| ModelMetaIndexes |
python | pandas-dev__pandas | pandas/io/formats/html.py | {
"start": 608,
"end": 22585
} | class ____:
"""
Internal class for formatting output data in html.
This class is intended for shared functionality between
DataFrame.to_html() and DataFrame._repr_html_().
Any logic in common with other output formatting methods
should ideally be inherited from classes in format.py
and this class responsible for only producing html markup.
"""
indent_delta: Final = 2
def __init__(
self,
formatter: DataFrameFormatter,
classes: str | list[str] | tuple[str, ...] | None = None,
border: int | bool | None = None,
table_id: str | None = None,
render_links: bool = False,
) -> None:
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements: list[str] = []
self.bold_rows = self.fmt.bold_rows
self.escape = self.fmt.escape
self.show_dimensions = self.fmt.show_dimensions
if border is None or border is True:
border = cast(int, get_option("display.html.border"))
elif not border:
border = None
self.border = border
self.table_id = table_id
self.render_links = render_links
self.col_space = {}
is_multi_index = isinstance(self.columns, MultiIndex)
for column, value in self.fmt.col_space.items():
col_space_value = f"{value}px" if isinstance(value, int) else value
self.col_space[column] = col_space_value
# GH 53885: Handling case where column is index
# Flatten the data in the multi index and add in the map
if is_multi_index and isinstance(column, tuple):
for column_index in column:
self.col_space[str(column_index)] = col_space_value
def to_string(self) -> str:
lines = self.render()
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
return "\n".join(lines)
def render(self) -> list[str]:
self._write_table()
if self.should_show_dimensions:
by = chr(215) # × # noqa: RUF003
self.write(
f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
)
return self.elements
@property
def should_show_dimensions(self) -> bool:
return self.fmt.should_show_dimensions
@property
def show_row_idx_names(self) -> bool:
return self.fmt.show_row_idx_names
@property
def show_col_idx_names(self) -> bool:
return self.fmt.show_col_idx_names
@property
def row_levels(self) -> int:
if self.fmt.index:
# showing (row) index
return self.frame.index.nlevels
elif self.show_col_idx_names:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# If the row index is not displayed a column of
# blank cells need to be included before the DataFrame values.
return 1
# not showing (row) index
return 0
def _get_columns_formatted_values(self) -> Iterable:
return self.columns
@property
def is_truncated(self) -> bool:
return self.fmt.is_truncated
@property
def ncols(self) -> int:
return len(self.fmt.tr_frame.columns)
def write(self, s: Any, indent: int = 0) -> None:
rs = pprint_thing(s)
self.elements.append(" " * indent + rs)
def write_th(
self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None
) -> None:
"""
Method for writing a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : bool, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : str, default None
Tags to include in the cell.
Returns
-------
A written <th> cell.
"""
col_space = self.col_space.get(s, None)
if header and col_space is not None:
tags = tags or ""
tags += f'style="min-width: {col_space};"'
self._write_cell(s, kind="th", indent=indent, tags=tags)
def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None:
self._write_cell(s, kind="td", indent=indent, tags=tags)
def _write_cell(
self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None
) -> None:
if tags is not None:
start_tag = f"<{kind} {tags}>"
else:
start_tag = f"<{kind}>"
if self.escape:
# escape & first to prevent double escaping of &
esc = {"&": r"&", "<": r"<", ">": r">"}
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
# replace spaces betweens strings with non-breaking spaces
rs = rs.replace(" ", " ")
if self.render_links and is_url(rs):
rs_unescaped = pprint_thing(s, escape_chars={}).strip()
start_tag += f'<a href="{rs_unescaped}" target="_blank">'
end_a = "</a>"
else:
end_a = ""
self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)
def write_tr(
self,
line: Iterable,
indent: int = 0,
indent_delta: int = 0,
header: bool = False,
align: str | None = None,
tags: dict[int, str] | None = None,
nindex_levels: int = 0,
) -> None:
if tags is None:
tags = {}
if align is None:
self.write("<tr>", indent)
else:
self.write(f'<tr style="text-align: {align};">', indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent=indent, header=header, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write("</tr>", indent)
def _write_table(self, indent: int = 0) -> None:
_classes = ["dataframe"] # Default class.
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
_classes.append("tex2jax_ignore")
_classes.append("mathjax_ignore")
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise TypeError(
"classes must be a string, list, "
f"or tuple, not {type(self.classes)}"
)
_classes.extend(self.classes)
if self.table_id is None:
id_section = ""
else:
id_section = f' id="{self.table_id}"'
if self.border is None:
border_attr = ""
else:
border_attr = f' border="{self.border}"'
self.write(
f'<table{border_attr} class="{" ".join(_classes)}"{id_section}>',
indent,
)
if self.fmt.header or self.show_row_idx_names:
self._write_header(indent + self.indent_delta)
self._write_body(indent + self.indent_delta)
self.write("</table>", indent)
def _write_col_header(self, indent: int) -> None:
row: list[Hashable]
is_truncated_horizontally = self.fmt.is_truncated_horizontally
if isinstance(self.columns, MultiIndex):
template = 'colspan="{span:d}" halign="left"'
sentinel: lib.NoDefault | bool
if self.fmt.sparsify:
# GH3547
sentinel = lib.no_default
else:
sentinel = False
levels = self.columns._format_multi(sparsify=sentinel, include_names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(
zip(level_lengths, levels, strict=True)
):
if is_truncated_horizontally:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (
values[:ins_col] + ("...",) + values[ins_col:]
)
else:
# sparse col headers do not receive a ...
values = (
values[:ins_col]
+ (values[ins_col - 1],)
+ values[ins_col:]
)
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + ("...",) + values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + ["..."] + values[ins_col:]
# see gh-22579
# Column Offset Bug with to_html(index=False) with
# MultiIndex Columns and Index.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code
# block below for standard columns index.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class and create a
# _get_formatted_column_labels function for code
# parity with DataFrameFormatter class.
if self.fmt.show_index_names:
name = self.columns.names[lnum]
row.append(pprint_thing(name or ""))
else:
row.append("")
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
else:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code block
# above for columns MultiIndex.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class.
if self.fmt.show_index_names:
row.append(self.columns.name or "")
else:
row.append("")
row.extend(self._get_columns_formatted_values())
align = self.fmt.justify
if is_truncated_horizontally:
ins_col = self.row_levels + self.fmt.tr_col_num
row.insert(ins_col, "...")
self.write_tr(row, indent, self.indent_delta, header=True, align=align)
def _write_row_header(self, indent: int) -> None:
is_truncated_horizontally = self.fmt.is_truncated_horizontally
row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (
self.ncols + (1 if is_truncated_horizontally else 0)
)
self.write_tr(row, indent, self.indent_delta, header=True)
def _write_header(self, indent: int) -> None:
self.write("<thead>", indent)
if self.fmt.header:
self._write_col_header(indent + self.indent_delta)
if self.show_row_idx_names:
self._write_row_header(indent + self.indent_delta)
self.write("</thead>", indent)
def _get_formatted_values(self) -> dict[int, list[str]]:
with option_context("display.max_colwidth", None):
fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}
return fmt_values
def _write_body(self, indent: int) -> None:
self.write("<tbody>", indent)
fmt_values = self._get_formatted_values()
# write values
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)
else:
self._write_regular_rows(fmt_values, indent + self.indent_delta)
self.write("</tbody>", indent)
def _write_regular_rows(
self, fmt_values: Mapping[int, list[str]], indent: int
) -> None:
is_truncated_horizontally = self.fmt.is_truncated_horizontally
is_truncated_vertically = self.fmt.is_truncated_vertically
nrows = len(self.fmt.tr_frame)
if self.fmt.index:
fmt = self.fmt._get_formatter("__index__")
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
# only reached with non-Multi index
index_values = self.fmt.tr_frame.index._format_flat(include_name=False)
row: list[str] = []
for i in range(nrows):
if is_truncated_vertically and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
row = []
if self.fmt.index:
row.append(index_values[i])
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Add blank cell before data cells.
elif self.show_col_idx_names:
row.append("")
row.extend(fmt_values[j][i] for j in range(self.ncols))
if is_truncated_horizontally:
dot_col_ix = self.fmt.tr_col_num + self.row_levels
row.insert(dot_col_ix, "...")
self.write_tr(
row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels
)
def _write_hierarchical_rows(
self, fmt_values: Mapping[int, list[str]], indent: int
) -> None:
template = 'rowspan="{span}" valign="top"'
is_truncated_horizontally = self.fmt.is_truncated_horizontally
is_truncated_vertically = self.fmt.is_truncated_vertically
frame = self.fmt.tr_frame
nrows = len(frame)
assert isinstance(frame.index, MultiIndex)
idx_values = frame.index._format_multi(sparsify=False, include_names=False)
idx_values = list(zip(*idx_values, strict=True))
if self.fmt.sparsify:
# GH3547
sentinel = lib.no_default
levels = frame.index._format_multi(sparsify=sentinel, include_names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if is_truncated_vertically:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = "..."
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = "..."
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(
ins_row, tuple(["..."] * len(level_lengths))
)
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = "..."
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in fmt_values:
fmt_values[ix_col].insert(ins_row, "...")
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i], strict=True):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(self.ncols))
if is_truncated_horizontally:
row.insert(
self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."
)
self.write_tr(
row,
indent,
self.indent_delta,
tags=tags,
nindex_levels=len(levels) - sparse_offset,
)
else:
row = []
for i in range(len(frame)):
if is_truncated_vertically and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
idx_values = list(
zip(
*frame.index._format_multi(sparsify=False, include_names=False),
strict=True,
)
)
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(self.ncols))
if is_truncated_horizontally:
row.insert(self.row_levels + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=None,
nindex_levels=frame.index.nlevels,
)
| HTMLFormatter |
python | wandb__wandb | wandb/sdk/lib/filesystem.py | {
"start": 2955,
"end": 3541
} | class ____:
"""Wrapper for a file object that serializes writes."""
def __init__(self, f: BinaryIO) -> None:
self.lock = threading.Lock()
self.f = f
def write(self, *args, **kargs) -> None: # type: ignore
self.lock.acquire()
try:
self.f.write(*args, **kargs)
self.f.flush()
finally:
self.lock.release()
def close(self) -> None:
self.lock.acquire() # wait for pending writes
try:
self.f.close()
finally:
self.lock.release()
| WriteSerializingFile |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail.py | {
"start": 721,
"end": 1303
} | class ____(BaseModel, alias_generator=None, frozen=True, extra=Extra.forbid):
x: int
y: str
def method(self) -> None:
pass
kwargs_model = KwargsModel(x=1, y='y', z='z')
# MYPY: error: Unexpected keyword argument "z" for "KwargsModel" [call-arg]
kwargs_model = KwargsModel(x=1)
# MYPY: error: Missing named argument "y" for "KwargsModel" [call-arg]
kwargs_model.y = 'a'
# MYPY: error: Property "y" defined in "KwargsModel" is read-only [misc]
KwargsModel.from_orm({})
# MYPY: error: "KwargsModel" does not have from_attributes=True [pydantic-orm]
| KwargsModel |
python | ray-project__ray | rllib/utils/exploration/thompson_sampling.py | {
"start": 336,
"end": 1490
} | class ____(Exploration):
@override(Exploration)
def get_exploration_action(
self,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True,
):
if self.framework == "torch":
return self._get_torch_exploration_action(action_distribution, explore)
elif self.framework == "tf2":
return self._get_tf_exploration_action(action_distribution, explore)
else:
raise NotImplementedError
def _get_torch_exploration_action(self, action_dist, explore):
if explore:
return action_dist.inputs.argmax(dim=-1), None
else:
scores = self.model.predict(self.model.current_obs())
return scores.argmax(dim=-1), None
def _get_tf_exploration_action(self, action_dist, explore):
action = tf.argmax(
tf.cond(
pred=explore,
true_fn=lambda: action_dist.inputs,
false_fn=lambda: self.model.predict(self.model.current_obs()),
),
axis=-1,
)
return action, None
| ThompsonSampling |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 2573,
"end": 2748
} | class ____(Generic[P, R], _Object[P, R, MetadataReturn]):
"""A single Weaviate object returned by a query within the `.query` namespace of a collection."""
@dataclass
| Object |
python | pytorch__pytorch | torch/_inductor/standalone_compile.py | {
"start": 3686,
"end": 9864
} | class ____(CompiledArtifact):
"""
CompiledArtifact that depends on torch.compiler.save_cache_artifacts
"""
CACHE_HEADER = bytes("CacheCompiledArtifact", "utf-8")
def __init__(
self,
compiled_fn: Callable[..., Any],
artifacts: Optional[tuple[bytes, CacheInfo]],
):
self._compiled_fn = compiled_fn
self._artifacts = artifacts
def __call__(self, *args: Any) -> Any:
return self._compiled_fn(*args)
def save(
self, *, path: str, format: Literal["binary", "unpacked"] = "binary"
) -> None:
with dynamo_timed("CompiledArtifact.save"):
if self._artifacts is None:
raise RuntimeError(
"CompiledArtifact.save failed to save since there's no artifact to save"
)
artifact_bytes, cache_info = self._artifacts
assert len(cache_info.aot_autograd_artifacts) == 1, cache_info
key = cache_info.aot_autograd_artifacts[0]
if format == "binary":
# can't assert that it is a file since it might not exist yet
assert not os.path.isdir(path)
from torch.utils._appending_byte_serializer import BytesWriter
from .codecache import torch_key
writer = BytesWriter()
writer.write_bytes(CacheCompiledArtifact.CACHE_HEADER)
writer.write_bytes(torch_key())
writer.write_str(key)
writer.write_bytes(artifact_bytes)
from torch._inductor.codecache import write_atomic
write_atomic(path, writer.to_bytes())
else:
assert format == "unpacked"
if os.path.exists(path):
assert os.path.isdir(path)
shutil.rmtree(path, ignore_errors=True)
from .codecache import FxGraphCache
with temporary_cache_dir(path):
# This function unpacks the cache artifacts to disk
loaded_cache_info = torch.compiler.load_cache_artifacts(
artifact_bytes
)
assert loaded_cache_info is not None
# Now write all the output_code artifacts to disk so that
# they can be inspected and modified
for key in loaded_cache_info.inductor_artifacts:
subdir = FxGraphCache._get_tmp_dir_for_key(key)
assert os.path.exists(subdir)
for path in sorted(os.listdir(subdir)):
with open(os.path.join(subdir, path), "rb") as f:
graph = pickle.load(f)
output_file = graph.write_to_disk()
log.info("Output code written to: %s", output_file)
@staticmethod
def _load_impl(
cache_dir_ctx: AbstractContextManager[Any], key: str
) -> CompiledArtifact:
with (
cache_dir_ctx,
config.patch(unsafe_skip_cache_dynamic_shape_guards=True),
):
with torch._functorch.config.patch(strict_autograd_cache=True):
from torch._functorch._aot_autograd.autograd_cache import (
AOTAutogradCache,
)
result = AOTAutogradCache._lookup(
key,
local=True,
remote=False,
args=[],
cache_info={},
aot_config=None,
)
assert result is not None
(entry, _) = result
from .compile_fx import _CompileFxKwargs
fx_config = _CompileFxKwargs(
cudagraphs=BoxedBool(False),
boxed_forward_device_index=BoxedDeviceIndex(0),
)
context = torch._guards.TracingContext(FakeTensorMode(shape_env=ShapeEnv()))
with torch._guards.tracing(context):
compiled_fn = entry.wrap_post_compile(
[], entry.sanitized_aot_config, fx_config
)
return CacheCompiledArtifact(lambda *args: compiled_fn(list(args)), None)
@staticmethod
def _prepare_load(
*, path: str, format: Literal["binary", "unpacked"] = "binary"
) -> tuple[str, AbstractContextManager[Any]]:
"""
Do format specific prep and loads, return a context manager and key
"""
path = normalize_path_separator(path)
with dynamo_timed("CompiledArtifact.load"):
if format == "binary":
# can't assert that it is a file since it might not exist yet
assert not os.path.isdir(path)
with open(path, "rb") as file:
artifacts = file.read()
from torch.utils._appending_byte_serializer import BytesReader
from .codecache import torch_key
reader = BytesReader(artifacts)
assert reader.read_bytes() == torch_key()
key = reader.read_str()
artifact_bytes = reader.read_bytes()
assert reader.is_finished()
torch.compiler.load_cache_artifacts(artifact_bytes)
return key, nullcontext()
else:
assert format == "unpacked"
assert os.path.isdir(path)
autograd_cache_dir = os.path.join(path, "aotautograd")
assert os.path.isdir(autograd_cache_dir)
files = list(os.listdir(autograd_cache_dir))
assert len(files) == 1
key = files[0]
cache_dir_ctx = temporary_cache_dir(path)
return key, cache_dir_ctx
@staticmethod
def load(
*, path: str, format: Literal["binary", "unpacked"] = "binary"
) -> CompiledArtifact:
key, cache_dir_ctx = CacheCompiledArtifact._prepare_load(
path=path, format=format
)
return CacheCompiledArtifact._load_impl(cache_dir_ctx, key)
| CacheCompiledArtifact |
python | apache__airflow | airflow-core/tests/unit/always/test_connection.py | {
"start": 2252,
"end": 2983
} | class ____:
def __init__(
self,
test_conn_uri: str,
test_conn_attributes: dict,
description: str,
):
"""
:param test_conn_uri: URI that we use to create connection
:param test_conn_attributes: we expect a connection object created with `test_uri` to have these
attributes
:param description: human-friendly name appended to parameterized test
"""
self.test_uri = test_conn_uri
self.test_conn_attributes = test_conn_attributes
self.description = description
@staticmethod
def uri_test_name(func, num, param):
return f"{func.__name__}_{num}_{param.args[0].description.replace(' ', '_')}"
| UriTestCaseConfig |
python | sqlalchemy__sqlalchemy | test/sql/test_functions.py | {
"start": 55320,
"end": 56640
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def setup_test(self):
self._registry = deepcopy(functions._registry)
def teardown_test(self):
functions._registry = self._registry
def test_GenericFunction_is_registered(self):
assert "GenericFunction" not in functions._registry["_default"]
def test_register_function(self):
# test generic function registering
class registered_func(GenericFunction):
_register = True
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, *args, **kwargs)
class registered_func_child(registered_func):
type = sqltypes.Integer
assert "registered_func" in functions._registry["_default"]
assert isinstance(func.registered_func_child().type, Integer)
class not_registered_func(GenericFunction):
_register = False
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, *args, **kwargs)
class not_registered_func_child(not_registered_func):
type = sqltypes.Integer
assert "not_registered_func" not in functions._registry["_default"]
assert isinstance(func.not_registered_func_child().type, Integer)
| RegisterTest |
python | pydantic__pydantic | tests/typechecking/fields.py | {
"start": 290,
"end": 2146
} | class ____(BaseModel):
# `default` and `default_factory` are mutually exclusive:
f1: int = Field(default=1, default_factory=int) # type: ignore[call-overload] # pyright: ignore[reportCallIssue]
# `default` and `default_factory` matches the annotation:
f2: int = Field(default='1') # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
f3: int = Field(default_factory=str) # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
f4: int = PrivateAttr(default='1') # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
f5: int = PrivateAttr(default_factory=str) # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
f6: list[str] = Field(default_factory=list)
f7: list[int] = Field(default_factory=new_list)
f8: list[str] = Field(default_factory=lambda: list())
f9: dict[str, str] = Field(default_factory=dict)
f10: int = Field(default_factory=lambda: 123)
# Note: mypy may require a different error code for `f12` (see https://github.com/python/mypy/issues/17986).
f11: list[str] = Field(default_factory=new_list) # type: ignore[arg-type] # pyright: ignore[reportAssignmentType]
f12: int = Field(default_factory=list) # type: ignore[arg-type, assignment, unused-ignore] # pyright: ignore[reportAssignmentType]
# Do not error on the ellipsis:
f13: int = Field(...)
# Do not error for invalid assignments when validate_default=True
f14: int = Field(default='1', validate_default=True)
f15: int = Field(default_factory=str, validate_default=True)
f16: int = Field(default='1', validate_default=False) # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
f17: int = Field(default_factory=str, validate_default=False) # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
| Model |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 640755,
"end": 670305
} | class ____(
FieldChannelMixin, core.FieldOrDatumDefWithConditionMarkPropFieldDefnumberArray
):
r"""
StrokeDash schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
legend : dict, :class:`Legend`, None
An object defining properties of the legend. If ``null``, the legend for the
encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeDash"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> StrokeDash: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> StrokeDash: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> StrokeDash: ...
@overload
def bandPosition(self, _: float, /) -> StrokeDash: ...
@overload
def bin(self, _: bool | Bin | None, /) -> StrokeDash: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> StrokeDash: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
) -> StrokeDash: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
) -> StrokeDash: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberArrayExprRef], /
) -> StrokeDash: ...
@overload
def field(self, _: str | RepeatRef, /) -> StrokeDash: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> StrokeDash: ...
@overload
def legend(self, _: Legend | None, /) -> StrokeDash: ...
@overload
def legend(
self,
*,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
clipHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columnPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
columns: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
direction: Optional[SchemaBase | Orientation_T] = Undefined,
fillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
gradientLength: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gradientStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gradientThickness: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gridAlign: Optional[Parameter | SchemaBase | Map | LayoutAlign_T] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOverlap: Optional[
bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map
] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendX: Optional[float | Parameter | SchemaBase | Map] = Undefined,
legendY: Optional[float | Parameter | SchemaBase | Map] = Undefined,
offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
orient: Optional[SchemaBase | LegendOrient_T] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
rowPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
symbolDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolFillColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolStrokeColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
symbolStrokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
symbolType: Optional[str | Parameter | SchemaBase | Map] = Undefined,
tickCount: Optional[
float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOrient: Optional[Parameter | SchemaBase | Map | Orient_T] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
type: Optional[Literal["symbol", "gradient"]] = Undefined,
values: Optional[
Parameter
| SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
] = Undefined,
zindex: Optional[float] = Undefined,
) -> StrokeDash: ...
@overload
def scale(self, _: Scale | None, /) -> StrokeDash: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> StrokeDash: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> StrokeDash: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> StrokeDash: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> StrokeDash: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> StrokeDash: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> StrokeDash: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> StrokeDash: ...
@overload
def type(self, _: StandardType_T, /) -> StrokeDash: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
legend=legend,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| StrokeDash |
python | yandexdataschool__Practical_RL | week07_seq2seq/basic_model_tf.py | {
"start": 766,
"end": 9656
} | class ____:
def __init__(self, name, inp_voc, out_voc,
emb_size, hid_size,):
self.name = name
self.inp_voc = inp_voc
self.out_voc = out_voc
with tf.variable_scope(name):
self.emb_inp = L.Embedding(len(inp_voc), emb_size)
self.emb_out = L.Embedding(len(out_voc), emb_size)
self.enc0 = tf.nn.rnn_cell.GRUCell(hid_size)
self.dec_start = L.Dense(hid_size)
self.dec0 = tf.nn.rnn_cell.GRUCell(hid_size)
self.logits = L.Dense(len(out_voc))
# run on dummy output to .build all layers (and therefore create
# weights)
inp = tf.placeholder('int32', [None, None])
out = tf.placeholder('int32', [None, None])
h0 = self.encode(inp)
h1 = self.decode(h0, out[:, 0])
# h2 = self.decode(h1,out[:,1]) etc.
self.weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
def encode(self, inp, **flags):
"""
Takes symbolic input sequence, computes initial state
:param inp: matrix of input tokens [batch, time]
:return: a list of initial decoder state tensors
"""
inp_lengths = infer_length(inp, self.inp_voc.eos_ix)
inp_emb = self.emb_inp(inp)
_, enc_last = tf.nn.dynamic_rnn(
self.enc0, inp_emb,
sequence_length=inp_lengths,
dtype=inp_emb.dtype)
dec_start = self.dec_start(enc_last)
return [dec_start]
def decode(self, prev_state, prev_tokens, **flags):
"""
Takes previous decoder state and tokens, returns new state and logits
:param prev_state: a list of previous decoder state tensors
:param prev_tokens: previous output tokens, an int vector of [batch_size]
:return: a list of next decoder state tensors, a tensor of logits [batch,n_tokens]
"""
[prev_dec] = prev_state
prev_emb = self.emb_out(prev_tokens[:, None])[:, 0]
new_dec_out, new_dec_state = self.dec0(prev_emb, prev_dec)
output_logits = self.logits(new_dec_out)
return [new_dec_state], output_logits
def symbolic_score(self, inp, out, eps=1e-30, **flags):
"""
Takes symbolic int32 matrices of hebrew words and their english translations.
Computes the log-probabilities of all possible english characters given english prefices and hebrew word.
:param inp: input sequence, int32 matrix of shape [batch,time]
:param out: output sequence, int32 matrix of shape [batch,time]
:return: log-probabilities of all possible english characters of shape [bath,time,n_tokens]
NOTE: log-probabilities time axis is synchronized with out
In other words, logp are probabilities of __current__ output at each tick, not the next one
therefore you can get likelihood as logprobas * tf.one_hot(out,n_tokens)
"""
first_state = self.encode(inp, **flags)
batch_size = tf.shape(inp)[0]
bos = tf.fill([batch_size], self.out_voc.bos_ix)
first_logits = tf.log(tf.one_hot(bos, len(self.out_voc)) + eps)
def step(blob, y_prev):
h_prev = blob[:-1]
h_new, logits = self.decode(h_prev, y_prev, **flags)
return list(h_new) + [logits]
results = tf.scan(step, initializer=list(first_state) + [first_logits],
elems=tf.transpose(out))
# gather state and logits, each of shape [time,batch,...]
states_seq, logits_seq = results[:-1], results[-1]
# add initial state and logits
logits_seq = tf.concat((first_logits[None], logits_seq), axis=0)
# convert from [time,batch,...] to [batch,time,...]
logits_seq = tf.transpose(logits_seq, [1, 0, 2])
return tf.nn.log_softmax(logits_seq)
def symbolic_translate(
self,
inp,
greedy=False,
max_len=None,
eps=1e-30,
**flags):
"""
takes symbolic int32 matrix of hebrew words, produces output tokens sampled
from the model and output log-probabilities for all possible tokens at each tick.
:param inp: input sequence, int32 matrix of shape [batch,time]
:param greedy: if greedy, takes token with highest probablity at each tick.
Otherwise samples proportionally to probability.
:param max_len: max length of output, defaults to 2 * input length
:return: output tokens int32[batch,time] and
log-probabilities of all tokens at each tick, [batch,time,n_tokens]
"""
first_state = self.encode(inp, **flags)
batch_size = tf.shape(inp)[0]
bos = tf.fill([batch_size], self.out_voc.bos_ix)
first_logits = tf.log(tf.one_hot(bos, len(self.out_voc)) + eps)
max_len = tf.reduce_max(tf.shape(inp)[1]) * 2
def step(blob, t):
h_prev, y_prev = blob[:-2], blob[-1]
h_new, logits = self.decode(h_prev, y_prev, **flags)
y_new = (
tf.argmax(logits, axis=-1) if greedy
else tf.multinomial(logits, 1)[:, 0]
)
return list(h_new) + [logits, tf.cast(y_new, y_prev.dtype)]
results = tf.scan(
step,
initializer=list(first_state) + [first_logits, bos],
elems=[tf.range(max_len)],
)
# gather state, logits and outs, each of shape [time,batch,...]
states_seq, logits_seq, out_seq = (
results[:-2], results[-2], results[-1]
)
# add initial state, logits and out
logits_seq = tf.concat((first_logits[None], logits_seq), axis=0)
out_seq = tf.concat((bos[None], out_seq), axis=0)
states_seq = [
tf.concat((init[None], states), axis=0)
for init, states in zip(first_state, states_seq)
]
# convert from [time,batch,...] to [batch,time,...]
logits_seq = tf.transpose(logits_seq, [1, 0, 2])
out_seq = tf.transpose(out_seq)
states_seq = [
tf.transpose(states, [1, 0] + list(range(2, states.shape.ndims)))
for states in states_seq
]
return out_seq, tf.nn.log_softmax(logits_seq)
### Utility functions ###
def initialize_uninitialized(sess=None):
"""
Initialize unitialized variables, doesn't affect those already initialized
:param sess: in which session to initialize stuff. Defaults to tf.get_default_session()
"""
sess = sess or tf.get_default_session()
global_vars = tf.global_variables()
is_not_initialized = sess.run(
[tf.is_variable_initialized(var) for var in global_vars]
)
not_initialized_vars = [
v for (v, f)
in zip(global_vars, is_not_initialized)
if not f
]
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def infer_length(seq, eos_ix, time_major=False, dtype=tf.int32):
"""
compute length given output indices and eos code
:param seq: tf matrix [time,batch] if time_major else [batch,time]
:param eos_ix: integer index of end-of-sentence token
:returns: lengths, int32 vector of shape [batch]
"""
axis = 0 if time_major else 1
is_eos = tf.cast(tf.equal(seq, eos_ix), dtype)
count_eos = tf.cumsum(is_eos, axis=axis, exclusive=True)
lengths = tf.reduce_sum(tf.cast(tf.equal(count_eos, 0), dtype), axis=axis)
return lengths
def infer_mask(seq, eos_ix, time_major=False, dtype=tf.float32):
"""
compute mask given output indices and eos code
:param seq: tf matrix [time,batch] if time_major else [batch,time]
:param eos_ix: integer index of end-of-sentence token
:returns: mask, float32 matrix with '0's and '1's of same shape as seq
"""
axis = 0 if time_major else 1
lengths = infer_length(seq, eos_ix, time_major=time_major)
mask = tf.sequence_mask(lengths, maxlen=tf.shape(seq)[axis], dtype=dtype)
if time_major:
mask = tf.transpose(mask)
return mask
def select_values_over_last_axis(values, indices):
"""
Auxiliary function to select logits corresponding to chosen tokens.
:param values: logits for all actions: float32[batch,tick,action]
:param indices: action ids int32[batch,tick]
:returns: values selected for the given actions: float[batch,tick]
"""
assert values.shape.ndims == 3 and indices.shape.ndims == 2
batch_size, seq_len = tf.shape(indices)[0], tf.shape(indices)[1]
batch_i = tf.tile(tf.range(0, batch_size)[:, None], [1, seq_len])
time_i = tf.tile(tf.range(0, seq_len)[None, :], [batch_size, 1])
indices_nd = tf.stack([batch_i, time_i, indices], axis=-1)
return tf.gather_nd(values, indices_nd)
| BasicTranslationModel |
python | astropy__astropy | astropy/uncertainty/tests/test_distribution.py | {
"start": 601,
"end": 2219
} | class ____:
@classmethod
def setup_class(cls):
cls.rates = np.array([1, 5, 30, 400])[:, np.newaxis]
cls.parr = np.random.poisson(cls.rates, (4, 1000))
cls.parr_t = np.random.poisson(cls.rates.squeeze(), (1000, 4))
def test_numpy_init(self):
# Test that we can initialize directly from a Numpy array
Distribution(self.parr)
def test_numpy_init_T(self):
Distribution(self.parr_t.T)
def test_quantity_init(self):
# Test that we can initialize directly from a Quantity
pq = self.parr << u.ct
pqd = Distribution(pq)
assert isinstance(pqd, u.Quantity)
assert isinstance(pqd, Distribution)
assert isinstance(pqd.value, Distribution)
assert_array_equal(pqd.value.distribution, self.parr)
def test_quantity_init_T(self):
# Test that we can initialize directly from a Quantity
pq = self.parr_t << u.ct
Distribution(pq.T)
def test_quantity_init_with_distribution(self):
# Test that we can initialize a Quantity from a Distribution.
pd = Distribution(self.parr)
qpd = pd << u.ct
assert isinstance(qpd, u.Quantity)
assert isinstance(qpd, Distribution)
assert qpd.unit == u.ct
assert_array_equal(qpd.value.distribution, pd.distribution.astype(float))
def test_init_scalar():
parr = np.random.poisson(np.array([1, 5, 30, 400])[:, np.newaxis], (4, 1000))
with pytest.raises(
TypeError, match=r"Attempted to initialize a Distribution with a scalar"
):
Distribution(parr.ravel()[0])
| TestInit |
python | jackfrued__Python-100-Days | 公开课/年薪50W+的Python程序员如何写代码/code/Python/opencourse/part03/example.py | {
"start": 453,
"end": 1106
} | class ____:
"""扑克"""
def __init__(self):
self.cards = [Card(suite, face) for suite in Suite
for face in range(1, 14)]
self.current = 0
def shuffle(self):
"""洗牌"""
self.current = 0
random.shuffle(self.cards)
def deal(self):
"""发牌"""
card = self.cards[self.current]
self.current += 1
return card
@property
def has_next(self):
"""还有没有牌可以发"""
return self.current < len(self.cards)
def main():
"""主函数(程序入口)"""
poker = Poker()
poker.shuffle()
print(poker.cards)
if __name__ == '__main__':
main()
| Poker |
python | pytorch__pytorch | torch/_inductor/dependencies.py | {
"start": 21114,
"end": 28837
} | class ____(V.KernelFormatterHandler): # type: ignore[name-defined]
def __init__(self, var_ranges: VarRanges, normalize: bool) -> None:
parent_handler = _RecordLoadStoreInner(
var_ranges=var_ranges, normalize=normalize
)
super().__init__(parent_handler=parent_handler)
# TODO: check call sites
def var_builder(prefix: str) -> tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:
cnt = itertools.count()
var_ranges: VarRanges = {}
def add_var(length: sympy.Expr) -> sympy.Symbol:
v = sympy_index_symbol(f"{prefix}{next(cnt)}")
var_ranges[v] = length
return v
return var_ranges, add_var
def index_vars_no_squeeze(
*argsizes: Sequence[sympy.Expr], prefix: str
) -> tuple[list[list[sympy.Symbol]], VarRanges]:
var_ranges, add_var = var_builder(prefix)
args: list[list[sympy.Symbol]] = [list(map(add_var, size)) for size in argsizes]
return args, var_ranges
def index_vars_squeeze(
*argsizes: Sequence[sympy.Expr], prefix: str = "d"
) -> tuple[list[Sequence[sympy.Expr]], VarRanges]:
from .ir import SqueezeView
var_ranges, add_var = var_builder(prefix)
args: list[Sequence[sympy.Expr]] = []
new_sizes: list[Sequence[sympy.Expr]] = []
for size in argsizes:
new_size, reindex = SqueezeView.squeezer(size)
new_sizes.append(new_size)
args.append(reindex(list(map(add_var, new_size))))
return args, var_ranges
def extract_read_writes(
fn: Callable[..., Any],
*argsizes: Sequence[sympy.Expr],
normalize: bool = False,
prefix: str = "d",
hidden_args: Sequence[list[sympy.Expr]] = (),
) -> ReadWrites:
args, var_ranges = index_vars_squeeze(*argsizes, prefix=prefix)
from .loop_body import LoopBody
if isinstance(fn, LoopBody):
inner = extract_loop_body_with_args(
fn,
[*args, *hidden_args], # type: ignore[list-item]
var_ranges,
normalize,
)
else:
# Slow path tracing the function
rw = RecordLoadStore(var_ranges, normalize=normalize)
with V.set_ops_handler(rw):
fn(*args, *hidden_args)
inner = rw.parent_handler
if normalize:
range_vars = [] # Number of vars could differ due to normalization
else:
range_vars = [*itertools.chain.from_iterable(args)]
return ReadWrites(
# pyrefly: ignore [missing-attribute]
OrderedSet(inner._reads),
# pyrefly: ignore [missing-attribute]
OrderedSet(inner._writes),
# pyrefly: ignore [missing-attribute]
inner._index_exprs,
range_vars,
var_ranges,
)
def extract_loop_body_with_args(
fn: Any,
args: list[list[sympy.Expr]],
var_ranges: VarRanges,
normalize: bool = False,
) -> _RecordLoadStoreInner:
from .loop_body import MemoryUsageType
# Fast path to avoid tracing when we already have a LoopBody
inner = _RecordLoadStoreInner(var_ranges=var_ranges, normalize=normalize)
name_to_index = fn.indexing_from_args(args)
if fn.indirect_vars:
# mimic the `tmpX` naming tracing gives us
repl = {v: make_symbol(SymT.TMP, i) for i, v in enumerate(fn.indirect_vars)}
name_to_index = {k: sympy_subs(v, repl) for k, v in name_to_index.items()} # type: ignore[arg-type]
for entry in fn.memory_usage[MemoryUsageType.LOAD]:
inner.load(entry.buffer_name, name_to_index[entry.index_name]) # type: ignore[arg-type]
for entry in fn.memory_usage[MemoryUsageType.LOAD_SEED]:
inner.load_seed(entry.buffer_name, int(name_to_index[entry.index_name])) # type: ignore[arg-type]
for entry in fn.memory_usage[MemoryUsageType.STORE]:
inner.store(
entry.buffer_name,
name_to_index[entry.index_name],
None, # type: ignore[arg-type]
entry.mode,
)
for entry in fn.memory_usage[MemoryUsageType.STORE_REDUCTION]:
inner.store_reduction(
entry.buffer_name,
name_to_index[entry.index_name],
None, # type: ignore[arg-type]
)
for entry in fn.memory_usage[MemoryUsageType.INDEX_EXPR]:
inner.index_expr(name_to_index[entry.index_name], None)
for entry in fn.memory_usage[MemoryUsageType.BUCKETIZE]:
# All that matters is that we record the buffer name, so place it in the
# "boundaries" name position to ensure that it's recorded.
inner.bucketize(
None,
(entry.buffer_name, None, None, None),
None,
None, # type: ignore[arg-type]
None, # type: ignore[arg-type]
)
# fn.memory_usage[MemoryUsageType.CHECK_BOUNDS] intentionally skipped
return inner
def extract_input_node_reduction_ranges(
input_node: "torch._inductor.ir.IRNode",
) -> tuple[Optional[list[sympy.Expr]], Optional[list[sympy.Expr]]]:
"""
Returns the size and reduction size of all inputs, if the sizes and reduction_sizes (if exist) are all the same.
It's possible that a node has multiple inputs, some are Reduction nodes and others are Pointwise nodes.
In this case, reduction_sizes of the Reduction nodes need to be the same.
Otherwise returns (None, None).
"""
from .ir import ComputedBuffer, ExternKernel, Loops
size: Optional[list[sympy.Expr]]
reduction_size: Optional[list[sympy.Expr]]
if isinstance(input_node.get_defining_op(), ComputedBuffer):
# Input node has already been realized. Return its size and reduction_size.
size = [*input_node.get_size()]
reduction_size = [*input_node.get_reduction_size()]
if len(reduction_size) > 0:
return (size, reduction_size)
else:
return (None, None)
if not isinstance(input_node.data.data, Loops): # type: ignore[attr-defined]
# Other IRNodes do not have reduction_ranges.
return (None, None)
# There is one issue: what if there are views / permutations between the input node and its dependent realized nodes?
# The current method still uses reduction ranges from the dependent realized node, which is not ideal.
# Is there a way to check whether there are permutations in between?
reads = input_node.get_reads()
reduction_size: Optional[list[sympy.Expr]] = None
size: Optional[list[sympy.Expr]] = None
while reduction_size is None and len(reads) > 0:
seen: OrderedSet[str] = OrderedSet()
new_reads: list[Dep] = []
for read in reads:
if not isinstance(read, MemoryDep):
continue
if read.name in seen:
continue
seen.add(read.name)
buffer = V.graph.try_get_buffer(read.name)
if buffer is None:
continue
op = buffer.get_defining_op()
if op is None or isinstance(op, ExternKernel):
continue
if isinstance(op, ComputedBuffer) and len(op.get_reduction_size()) > 0:
if reduction_size is None:
reduction_size = [*op.get_reduction_size()]
size = [*op.get_size()]
elif reduction_size != [*op.get_reduction_size()] or size != [
*op.get_size()
]:
return (None, None)
else:
new_reads.extend(op.get_reads())
if reads == new_reads:
return (size, reduction_size)
else:
reads = OrderedSet(new_reads)
return (size, reduction_size)
def canonicalization_prefix() -> str:
return "c"
# ops handler which computes all the free symbols for an IR
| RecordLoadStore |
python | ipython__ipython | IPython/testing/tools.py | {
"start": 7773,
"end": 9029
} | class ____(unittest.TestCase):
"""Utility class to create temporary Python/IPython files.
Meant as a mixin class for test cases."""
def mktmp(self, src, ext='.py'):
"""Make a valid python temp file."""
fname = temp_pyfile(src, ext)
if not hasattr(self, 'tmps'):
self.tmps=[]
self.tmps.append(fname)
self.fname = fname
def tearDown(self):
# If the tmpfile wasn't made because of skipped tests, like in
# win32, there's nothing to cleanup.
if hasattr(self, 'tmps'):
for fname in self.tmps:
# If the tmpfile wasn't made because of skipped tests, like in
# win32, there's nothing to cleanup.
try:
os.unlink(fname)
except:
# On Windows, even though we close the file, we still can't
# delete it. I have no clue why
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.tearDown()
MyStringIO = StringIO
_re_type = type(re.compile(r''))
notprinted_msg = """Did not find {0!r} in printed output (on {1}):
-------
{2!s}
-------
"""
| TempFileMixin |
python | ray-project__ray | python/ray/tune/stopper/trial_plateau.py | {
"start": 199,
"end": 3332
} | class ____(Stopper):
"""Early stop single trials when they reached a plateau.
When the standard deviation of the `metric` result of a trial is
below a threshold `std`, the trial plateaued and will be stopped
early.
Args:
metric: Metric to check for convergence.
std: Maximum metric standard deviation to decide if a
trial plateaued. Defaults to 0.01.
num_results: Number of results to consider for stdev
calculation.
grace_period: Minimum number of timesteps before a trial
can be early stopped
metric_threshold (Optional[float]):
Minimum or maximum value the result has to exceed before it can
be stopped early.
mode: If a `metric_threshold` argument has been
passed, this must be one of [min, max]. Specifies if we optimize
for a large metric (max) or a small metric (min). If max, the
`metric_threshold` has to be exceeded, if min the value has to
be lower than `metric_threshold` in order to early stop.
"""
def __init__(
self,
metric: str,
std: float = 0.01,
num_results: int = 4,
grace_period: int = 4,
metric_threshold: Optional[float] = None,
mode: Optional[str] = None,
):
self._metric = metric
self._mode = mode
self._std = std
self._num_results = num_results
self._grace_period = grace_period
self._metric_threshold = metric_threshold
if self._metric_threshold:
if mode not in ["min", "max"]:
raise ValueError(
f"When specifying a `metric_threshold`, the `mode` "
f"argument has to be one of [min, max]. "
f"Got: {mode}"
)
self._iter = defaultdict(lambda: 0)
self._trial_results = defaultdict(lambda: deque(maxlen=self._num_results))
def __call__(self, trial_id: str, result: Dict):
metric_result = result.get(self._metric)
self._trial_results[trial_id].append(metric_result)
self._iter[trial_id] += 1
# If still in grace period, do not stop yet
if self._iter[trial_id] < self._grace_period:
return False
# If not enough results yet, do not stop yet
if len(self._trial_results[trial_id]) < self._num_results:
return False
# If metric threshold value not reached, do not stop yet
if self._metric_threshold is not None:
if self._mode == "min" and metric_result > self._metric_threshold:
return False
elif self._mode == "max" and metric_result < self._metric_threshold:
return False
# Calculate stdev of last `num_results` results
try:
current_std = np.std(self._trial_results[trial_id])
except Exception:
current_std = float("inf")
# If stdev is lower than threshold, stop early.
return current_std < self._std
def stop_all(self):
return False
| TrialPlateauStopper |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_base.py | {
"start": 80123,
"end": 86531
} | class ____(_DeclarativeMapperConfig):
"""Configurator that extends _DeclarativeMapperConfig to add a
"deferred" step, to allow extensions like AbstractConcreteBase,
DeferredMapping to partially set up a mapping that is "prepared"
when table metadata is ready.
"""
_cls: weakref.ref[Type[Any]]
is_deferred = True
_configs: util.OrderedDict[
weakref.ref[Type[Any]], _DeferredDeclarativeConfig
] = util.OrderedDict()
def _early_mapping(self, mapper_kw: _MapperKwArgs) -> None:
pass
@property
def cls(self) -> Type[Any]:
return self._cls() # type: ignore
@cls.setter
def cls(self, class_: Type[Any]) -> None:
self._cls = weakref.ref(class_, self._remove_config_cls)
self._configs[self._cls] = self
@classmethod
def _remove_config_cls(cls, ref: weakref.ref[Type[Any]]) -> None:
cls._configs.pop(ref, None)
@classmethod
def has_cls(cls, class_: Type[Any]) -> bool:
# 2.6 fails on weakref if class_ is an old style class
return isinstance(class_, type) and weakref.ref(class_) in cls._configs
@classmethod
def raise_unmapped_for_cls(cls, class_: Type[Any]) -> NoReturn:
if hasattr(class_, "_sa_raise_deferred_config"):
class_._sa_raise_deferred_config()
raise orm_exc.UnmappedClassError(
class_,
msg=(
f"Class {orm_exc._safe_cls_name(class_)} has a deferred "
"mapping on it. It is not yet usable as a mapped class."
),
)
@classmethod
def config_for_cls(cls, class_: Type[Any]) -> _DeferredDeclarativeConfig:
return cls._configs[weakref.ref(class_)]
@classmethod
def classes_for_base(
cls, base_cls: Type[Any], sort: bool = True
) -> List[_DeferredDeclarativeConfig]:
classes_for_base = [
m
for m, cls_ in [(m, m.cls) for m in cls._configs.values()]
if cls_ is not None and issubclass(cls_, base_cls)
]
if not sort:
return classes_for_base
all_m_by_cls = {m.cls: m for m in classes_for_base}
tuples: List[
Tuple[_DeferredDeclarativeConfig, _DeferredDeclarativeConfig]
] = []
for m_cls in all_m_by_cls:
tuples.extend(
(all_m_by_cls[base_cls], all_m_by_cls[m_cls])
for base_cls in m_cls.__bases__
if base_cls in all_m_by_cls
)
return list(topological.sort(tuples, classes_for_base))
def map(self, mapper_kw: _MapperKwArgs = util.EMPTY_DICT) -> Mapper[Any]:
self._configs.pop(self._cls, None)
return super().map(mapper_kw)
def _add_attribute(
cls: Type[Any], key: str, value: MapperProperty[Any]
) -> None:
"""add an attribute to an existing declarative class.
This runs through the logic to determine MapperProperty,
adds it to the Mapper, adds a column to the mapped Table, etc.
"""
if "__mapper__" in cls.__dict__:
mapped_cls = cast("MappedClassProtocol[Any]", cls)
def _table_or_raise(mc: MappedClassProtocol[Any]) -> Table:
if isinstance(mc.__table__, Table):
return mc.__table__
raise exc.InvalidRequestError(
f"Cannot add a new attribute to mapped class {mc.__name__!r} "
"because it's not mapped against a table."
)
if isinstance(value, Column):
_undefer_column_name(key, value)
_table_or_raise(mapped_cls).append_column(
value, replace_existing=True
)
mapped_cls.__mapper__.add_property(key, value)
elif isinstance(value, _MapsColumns):
mp = value.mapper_property_to_assign
for col, _ in value.columns_to_assign:
_undefer_column_name(key, col)
_table_or_raise(mapped_cls).append_column(
col, replace_existing=True
)
if not mp:
mapped_cls.__mapper__.add_property(key, col)
if mp:
mapped_cls.__mapper__.add_property(key, mp)
elif isinstance(value, MapperProperty):
mapped_cls.__mapper__.add_property(key, value)
elif isinstance(value, QueryableAttribute) and value.key != key:
# detect a QueryableAttribute that's already mapped being
# assigned elsewhere in userland, turn into a synonym()
value = SynonymProperty(value.key)
mapped_cls.__mapper__.add_property(key, value)
else:
type.__setattr__(cls, key, value)
mapped_cls.__mapper__._expire_memoizations()
else:
type.__setattr__(cls, key, value)
def _del_attribute(cls: Type[Any], key: str) -> None:
if (
"__mapper__" in cls.__dict__
and key in cls.__dict__
and not cast(
"MappedClassProtocol[Any]", cls
).__mapper__._dispose_called
):
value = cls.__dict__[key]
if isinstance(
value, (Column, _MapsColumns, MapperProperty, QueryableAttribute)
):
raise NotImplementedError(
"Can't un-map individual mapped attributes on a mapped class."
)
else:
type.__delattr__(cls, key)
cast(
"MappedClassProtocol[Any]", cls
).__mapper__._expire_memoizations()
else:
type.__delattr__(cls, key)
def _declarative_constructor(self: Any, **kwargs: Any) -> None:
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" % (k, cls_.__name__)
)
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = "__init__"
def _undefer_column_name(key: str, column: Column[Any]) -> None:
if column.key is None:
column.key = key
if column.name is None:
column.name = key
| _DeferredDeclarativeConfig |
python | sphinx-doc__sphinx | sphinx/io.py | {
"start": 3429,
"end": 4169
} | class ____(UnfilteredWriter): # type: ignore[type-arg]
"""Dummy writer module used for generating doctree."""
def __init__(self) -> None:
super().__init__()
warnings.warn(
'sphinx.io.SphinxDummyWriter is deprecated',
RemovedInSphinx10Warning,
stacklevel=2,
)
supported = ('html',) # needed to keep "meta" nodes
def translate(self) -> None:
pass
def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:
"""Bypass source object as is to cheat Publisher."""
warnings.warn(
'sphinx.io.SphinxDummySourceClass is deprecated',
RemovedInSphinx10Warning,
stacklevel=2,
)
return source
| SphinxDummyWriter |
python | kubernetes-client__python | kubernetes/client/models/v1_validating_webhook.py | {
"start": 383,
"end": 20181
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admission_review_versions': 'list[str]',
'client_config': 'AdmissionregistrationV1WebhookClientConfig',
'failure_policy': 'str',
'match_conditions': 'list[V1MatchCondition]',
'match_policy': 'str',
'name': 'str',
'namespace_selector': 'V1LabelSelector',
'object_selector': 'V1LabelSelector',
'rules': 'list[V1RuleWithOperations]',
'side_effects': 'str',
'timeout_seconds': 'int'
}
attribute_map = {
'admission_review_versions': 'admissionReviewVersions',
'client_config': 'clientConfig',
'failure_policy': 'failurePolicy',
'match_conditions': 'matchConditions',
'match_policy': 'matchPolicy',
'name': 'name',
'namespace_selector': 'namespaceSelector',
'object_selector': 'objectSelector',
'rules': 'rules',
'side_effects': 'sideEffects',
'timeout_seconds': 'timeoutSeconds'
}
def __init__(self, admission_review_versions=None, client_config=None, failure_policy=None, match_conditions=None, match_policy=None, name=None, namespace_selector=None, object_selector=None, rules=None, side_effects=None, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
"""V1ValidatingWebhook - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admission_review_versions = None
self._client_config = None
self._failure_policy = None
self._match_conditions = None
self._match_policy = None
self._name = None
self._namespace_selector = None
self._object_selector = None
self._rules = None
self._side_effects = None
self._timeout_seconds = None
self.discriminator = None
self.admission_review_versions = admission_review_versions
self.client_config = client_config
if failure_policy is not None:
self.failure_policy = failure_policy
if match_conditions is not None:
self.match_conditions = match_conditions
if match_policy is not None:
self.match_policy = match_policy
self.name = name
if namespace_selector is not None:
self.namespace_selector = namespace_selector
if object_selector is not None:
self.object_selector = object_selector
if rules is not None:
self.rules = rules
self.side_effects = side_effects
if timeout_seconds is not None:
self.timeout_seconds = timeout_seconds
@property
def admission_review_versions(self):
"""Gets the admission_review_versions of this V1ValidatingWebhook. # noqa: E501
AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
:return: The admission_review_versions of this V1ValidatingWebhook. # noqa: E501
:rtype: list[str]
"""
return self._admission_review_versions
@admission_review_versions.setter
def admission_review_versions(self, admission_review_versions):
"""Sets the admission_review_versions of this V1ValidatingWebhook.
AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. # noqa: E501
:param admission_review_versions: The admission_review_versions of this V1ValidatingWebhook. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and admission_review_versions is None: # noqa: E501
raise ValueError("Invalid value for `admission_review_versions`, must not be `None`") # noqa: E501
self._admission_review_versions = admission_review_versions
@property
def client_config(self):
"""Gets the client_config of this V1ValidatingWebhook. # noqa: E501
:return: The client_config of this V1ValidatingWebhook. # noqa: E501
:rtype: AdmissionregistrationV1WebhookClientConfig
"""
return self._client_config
@client_config.setter
def client_config(self, client_config):
"""Sets the client_config of this V1ValidatingWebhook.
:param client_config: The client_config of this V1ValidatingWebhook. # noqa: E501
:type: AdmissionregistrationV1WebhookClientConfig
"""
if self.local_vars_configuration.client_side_validation and client_config is None: # noqa: E501
raise ValueError("Invalid value for `client_config`, must not be `None`") # noqa: E501
self._client_config = client_config
@property
def failure_policy(self):
"""Gets the failure_policy of this V1ValidatingWebhook. # noqa: E501
FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
:return: The failure_policy of this V1ValidatingWebhook. # noqa: E501
:rtype: str
"""
return self._failure_policy
@failure_policy.setter
def failure_policy(self, failure_policy):
"""Sets the failure_policy of this V1ValidatingWebhook.
FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail. # noqa: E501
:param failure_policy: The failure_policy of this V1ValidatingWebhook. # noqa: E501
:type: str
"""
self._failure_policy = failure_policy
@property
def match_conditions(self):
"""Gets the match_conditions of this V1ValidatingWebhook. # noqa: E501
MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped # noqa: E501
:return: The match_conditions of this V1ValidatingWebhook. # noqa: E501
:rtype: list[V1MatchCondition]
"""
return self._match_conditions
@match_conditions.setter
def match_conditions(self, match_conditions):
"""Sets the match_conditions of this V1ValidatingWebhook.
MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed. The exact matching logic is (in order): 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. 2. If ALL matchConditions evaluate to TRUE, the webhook is called. 3. If any matchCondition evaluates to an error (but none are FALSE): - If failurePolicy=Fail, reject the request - If failurePolicy=Ignore, the error is ignored and the webhook is skipped # noqa: E501
:param match_conditions: The match_conditions of this V1ValidatingWebhook. # noqa: E501
:type: list[V1MatchCondition]
"""
self._match_conditions = match_conditions
@property
def match_policy(self):
"""Gets the match_policy of this V1ValidatingWebhook. # noqa: E501
matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
:return: The match_policy of this V1ValidatingWebhook. # noqa: E501
:rtype: str
"""
return self._match_policy
@match_policy.setter
def match_policy(self, match_policy):
"""Sets the match_policy of this V1ValidatingWebhook.
matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\". - Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. Defaults to \"Equivalent\" # noqa: E501
:param match_policy: The match_policy of this V1ValidatingWebhook. # noqa: E501
:type: str
"""
self._match_policy = match_policy
@property
def name(self):
"""Gets the name of this V1ValidatingWebhook. # noqa: E501
The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
:return: The name of this V1ValidatingWebhook. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ValidatingWebhook.
The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required. # noqa: E501
:param name: The name of this V1ValidatingWebhook. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace_selector(self):
"""Gets the namespace_selector of this V1ValidatingWebhook. # noqa: E501
:return: The namespace_selector of this V1ValidatingWebhook. # noqa: E501
:rtype: V1LabelSelector
"""
return self._namespace_selector
@namespace_selector.setter
def namespace_selector(self, namespace_selector):
"""Sets the namespace_selector of this V1ValidatingWebhook.
:param namespace_selector: The namespace_selector of this V1ValidatingWebhook. # noqa: E501
:type: V1LabelSelector
"""
self._namespace_selector = namespace_selector
@property
def object_selector(self):
"""Gets the object_selector of this V1ValidatingWebhook. # noqa: E501
:return: The object_selector of this V1ValidatingWebhook. # noqa: E501
:rtype: V1LabelSelector
"""
return self._object_selector
@object_selector.setter
def object_selector(self, object_selector):
"""Sets the object_selector of this V1ValidatingWebhook.
:param object_selector: The object_selector of this V1ValidatingWebhook. # noqa: E501
:type: V1LabelSelector
"""
self._object_selector = object_selector
@property
def rules(self):
"""Gets the rules of this V1ValidatingWebhook. # noqa: E501
Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
:return: The rules of this V1ValidatingWebhook. # noqa: E501
:rtype: list[V1RuleWithOperations]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1ValidatingWebhook.
Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. # noqa: E501
:param rules: The rules of this V1ValidatingWebhook. # noqa: E501
:type: list[V1RuleWithOperations]
"""
self._rules = rules
@property
def side_effects(self):
"""Gets the side_effects of this V1ValidatingWebhook. # noqa: E501
SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
:return: The side_effects of this V1ValidatingWebhook. # noqa: E501
:rtype: str
"""
return self._side_effects
@side_effects.setter
def side_effects(self, side_effects):
"""Sets the side_effects of this V1ValidatingWebhook.
SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. # noqa: E501
:param side_effects: The side_effects of this V1ValidatingWebhook. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and side_effects is None: # noqa: E501
raise ValueError("Invalid value for `side_effects`, must not be `None`") # noqa: E501
self._side_effects = side_effects
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this V1ValidatingWebhook. # noqa: E501
TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
:return: The timeout_seconds of this V1ValidatingWebhook. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this V1ValidatingWebhook.
TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1ValidatingWebhook. # noqa: E501
:type: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ValidatingWebhook):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ValidatingWebhook):
return True
return self.to_dict() != other.to_dict()
| V1ValidatingWebhook |
python | google__pytype | pytype/directors/parser.py | {
"start": 948,
"end": 1387
} | class ____:
"""A structured comment.
Attributes:
line: The line number.
tool: The tool label, e.g., "type" for "# type: int".
data: The data, e.g., "int" for "# type: int".
open_ended: True if the comment appears on a line by itself (i.e., it is
open-ended rather than attached to a line of code).
"""
line: int
tool: str
data: str
open_ended: bool
@dataclasses.dataclass(frozen=True)
| _StructuredComment |
python | jina-ai__jina | tests/integration/docarray_v2/csp/SampleColbertExecutor/executor.py | {
"start": 199,
"end": 299
} | class ____(BaseDoc):
text: str = Field(description="The text of the document", default="")
| TextDoc |
python | pytorch__pytorch | torch/_inductor/kernel_inputs.py | {
"start": 6091,
"end": 10821
} | class ____(KernelInputs):
"""
Specialized KernelInputs for matrix multiplication operations.
Provides additional methods to access M, N, K dimensions.
"""
def __init__(
self,
input_nodes: list[Any],
scalars: Optional[dict[str, Union[float, int]]] = None,
out_dtype: Optional[torch.dtype] = None,
mat1_idx: int = -2,
mat2_idx: int = -1,
):
"""
Initialize with a tuple of input nodes.
By default, we assume the last 2 input nodes are mat1 and mat2, but
the caller can adjust when necessary
"""
super().__init__(input_nodes, scalars, out_dtype)
# for mm, we need at least 2 nodes, and we need to know which nodes
# are the main matrixes e.g. addmm is (bias, mat1, mat2) whereas others
# might be (mat1, mat2, scale), etc.
assert len(self._input_nodes) >= 2, "Expected at least 2 input nodes"
# Adjust assertions to handle negative indices
m1_idx, m2_idx = mat1_idx, mat2_idx
if mat1_idx < 0:
m1_idx += len(input_nodes)
if mat2_idx < 0:
m2_idx += len(input_nodes)
assert 0 <= m1_idx < len(input_nodes), f"Invalid mat1_idx: {mat1_idx}"
assert 0 <= m1_idx < len(input_nodes), f"Invalid mat2_idx: {mat2_idx}"
self._mat1_idx = mat1_idx
self._mat2_idx = mat2_idx
def mnk_symbolic(
self,
) -> tuple[sympy.Integer, sympy.Integer, sympy.Integer]:
"""
Get the symbolic M, N, K dimensions for matrix multiplication.
Handles both 2D (MM) and 3D (BMM) tensors.
M is extracted from the second-to-last dimension of the first operand (mat1).
N is extracted from the last dimension of the second operand (mat2).
K is extracted from the last dimension of the first operand (mat1).
Returns:
A tuple of (M, N, K) dimensions
"""
mat1 = self.nodes()[self._mat1_idx]
mat2 = self.nodes()[self._mat2_idx]
m = mat1.get_size()[-2] # M from second-to-last dimension of mat1
k = mat1.get_size()[-1] # K from last dimension of mat1
n = mat2.get_size()[-1] # N from last dimension of mat2
# Ensure K dimensions match between operands
k0 = mat2.get_size()[-2] # K from second-to-last dimension of mat2
V.graph.sizevars.check_equals(k, k0)
return (m, n, k)
def out_dtype(self) -> torch.dtype:
"""
Get the output dtype, whether passed in or inferred from the nodes
Returns:
The output dtype
"""
if self._out_dtype is not None:
return self._out_dtype
return self.mat1mat2()[0].get_dtype()
def output_layout(self, flexible: bool = True) -> Layout:
"""
Handle output layout generation for matrix multiplication.
Args:
out_dtype: Optional output dtype. If not provided, infer from inputs
flexible: If True, return FlexibleLayout, otherwise FixedLayout
"""
mat1, mat2 = self.mat1mat2()
out_dtype = self.out_dtype()
# NOTE: taken from mm_common.mm_args
*b1, m, k1 = mat1.get_size()
*b2, k2, n = mat2.get_size()
b = [V.graph.sizevars.check_equals_and_simplify(a, b) for a, b in zip(b1, b2)]
size = [*b, m, n]
if flexible:
return FlexibleLayout(self.device(), out_dtype, size)
else:
return FixedLayout(self.device(), out_dtype, size)
def mat1mat2(self) -> tuple[Any, Any]:
"""
Get the mat1 and mat2 nodes.
Returns:
A tuple of (mat1, mat2) nodes
"""
nodes = self.nodes()
return nodes[self._mat1_idx], nodes[self._mat2_idx]
def mnk_hinted(self) -> tuple[int, int, int]:
"""
Get the hinted M, N, K dimensions for matrix multiplication.
Handles both 2D (MM) and 3D (BMM) tensors.
Uses shapes_hinted from the base class to get integer hints for dimensions.
Returns:
A tuple of (M, N, K) dimensions as integers
"""
hinted_shapes = self.shapes_hinted()
mat1_shape = hinted_shapes[self._mat1_idx]
mat2_shape = hinted_shapes[self._mat2_idx]
m = mat1_shape[-2] # M from second-to-last dimension of mat1
k = mat1_shape[-1] # K from last dimension of mat1
n = mat2_shape[-1] # N from last dimension of mat2
# Ensure K dimensions match between operands
k_check = mat2_shape[-2] # K from second-to-last dimension of mat2
assert k == k_check, f"K dimensions don't match: {k} vs {k_check}"
return (m, n, k)
| MMKernelInputs |
python | kamyu104__LeetCode-Solutions | Python/find-the-k-th-lucky-number.py | {
"start": 49,
"end": 401
} | class ____(object):
def kthLuckyNumber(self, k):
"""
:type k: int
:rtype: str
"""
result = []
k += 1
while k != 1:
result.append('7' if k&1 else '4')
k >>= 1
result.reverse()
return "".join(result)
# Time: O(logn)
# Space: O(1)
# math, bitmasks
| Solution |
python | wandb__wandb | wandb/sdk/data_types/image.py | {
"start": 30478,
"end": 36123
} | class ____(_dtypes.Type):
name = "image-file"
legacy_names = ["wandb.Image"]
types = [Image]
def __init__(
self,
box_layers=None,
box_score_keys=None,
mask_layers=None,
class_map=None,
**kwargs,
):
box_layers = box_layers or {}
box_score_keys = box_score_keys or []
mask_layers = mask_layers or {}
class_map = class_map or {}
if isinstance(box_layers, _dtypes.ConstType):
box_layers = box_layers._params["val"]
if not isinstance(box_layers, dict):
raise TypeError("box_layers must be a dict")
else:
box_layers = _dtypes.ConstType(
{layer_key: set(box_layers[layer_key]) for layer_key in box_layers}
)
if isinstance(mask_layers, _dtypes.ConstType):
mask_layers = mask_layers._params["val"]
if not isinstance(mask_layers, dict):
raise TypeError("mask_layers must be a dict")
else:
mask_layers = _dtypes.ConstType(
{layer_key: set(mask_layers[layer_key]) for layer_key in mask_layers}
)
if isinstance(box_score_keys, _dtypes.ConstType):
box_score_keys = box_score_keys._params["val"]
if not isinstance(box_score_keys, list) and not isinstance(box_score_keys, set):
raise TypeError("box_score_keys must be a list or a set")
else:
box_score_keys = _dtypes.ConstType(set(box_score_keys))
if isinstance(class_map, _dtypes.ConstType):
class_map = class_map._params["val"]
if not isinstance(class_map, dict):
raise TypeError("class_map must be a dict")
else:
class_map = _dtypes.ConstType(class_map)
self.params.update(
{
"box_layers": box_layers,
"box_score_keys": box_score_keys,
"mask_layers": mask_layers,
"class_map": class_map,
}
)
def assign_type(self, wb_type=None):
if isinstance(wb_type, _ImageFileType):
box_layers_self = self.params["box_layers"].params["val"] or {}
box_score_keys_self = self.params["box_score_keys"].params["val"] or []
mask_layers_self = self.params["mask_layers"].params["val"] or {}
class_map_self = self.params["class_map"].params["val"] or {}
box_layers_other = wb_type.params["box_layers"].params["val"] or {}
box_score_keys_other = wb_type.params["box_score_keys"].params["val"] or []
mask_layers_other = wb_type.params["mask_layers"].params["val"] or {}
class_map_other = wb_type.params["class_map"].params["val"] or {}
# Merge the class_ids from each set of box_layers
box_layers = {
str(key): set(
list(box_layers_self.get(key, []))
+ list(box_layers_other.get(key, []))
)
for key in set(
list(box_layers_self.keys()) + list(box_layers_other.keys())
)
}
# Merge the class_ids from each set of mask_layers
mask_layers = {
str(key): set(
list(mask_layers_self.get(key, []))
+ list(mask_layers_other.get(key, []))
)
for key in set(
list(mask_layers_self.keys()) + list(mask_layers_other.keys())
)
}
# Merge the box score keys
box_score_keys = set(list(box_score_keys_self) + list(box_score_keys_other))
# Merge the class_map
class_map = {
str(key): class_map_self.get(key, class_map_other.get(key, None))
for key in set(
list(class_map_self.keys()) + list(class_map_other.keys())
)
}
return _ImageFileType(box_layers, box_score_keys, mask_layers, class_map)
return _dtypes.InvalidType()
@classmethod
def from_obj(cls, py_obj):
if not isinstance(py_obj, Image):
raise TypeError("py_obj must be a wandb.Image")
else:
if hasattr(py_obj, "_boxes") and py_obj._boxes:
box_layers = {
str(key): set(py_obj._boxes[key]._class_labels.keys())
for key in py_obj._boxes.keys()
}
box_score_keys = {
key
for val in py_obj._boxes.values()
for box in val._val
for key in box.get("scores", {}).keys()
}
else:
box_layers = {}
box_score_keys = set()
if hasattr(py_obj, "_masks") and py_obj._masks:
mask_layers = {
str(key): set(
py_obj._masks[key]._val["class_labels"].keys()
if hasattr(py_obj._masks[key], "_val")
else []
)
for key in py_obj._masks.keys()
}
else:
mask_layers = {}
if hasattr(py_obj, "_classes") and py_obj._classes:
class_set = {
str(item["id"]): item["name"] for item in py_obj._classes._class_set
}
else:
class_set = {}
return cls(box_layers, box_score_keys, mask_layers, class_set)
_dtypes.TypeRegistry.add(_ImageFileType)
| _ImageFileType |
python | neetcode-gh__leetcode | python/1930-unique-length-3-palindromic-subsequences.py | {
"start": 0,
"end": 261
} | class ____:
def countPalindromicSubsequence(self, s: str) -> int:
count = 0
chars = set(s)
for char in chars:
first,last = s.find(char),s.rfind(char)
count += len(set(s[first+1:last]))
return count | Solution |
python | apache__airflow | providers/apache/beam/tests/unit/apache/beam/operators/test_beam.py | {
"start": 24403,
"end": 40872
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, default_options, pipeline_options):
self.default_op_kwargs = {
"task_id": TASK_ID,
"default_pipeline_options": copy.deepcopy(default_options),
"pipeline_options": copy.deepcopy(pipeline_options),
}
def test_init(self, default_options, pipeline_options):
"""Test BeamRunGoPipelineOperator instance is properly initialized with go_file."""
op = BeamRunGoPipelineOperator(**self.default_op_kwargs, go_file=GO_FILE, dataflow_config={})
assert op.task_id == TASK_ID
assert op.go_file == GO_FILE
assert op.launcher_binary == ""
assert op.worker_binary == ""
assert op.runner == DEFAULT_RUNNER
assert op.default_pipeline_options == default_options
assert op.pipeline_options == pipeline_options
assert op.dataflow_config == {}
def test_init_with_launcher_binary(self, default_options, pipeline_options):
"""Test BeamRunGoPipelineOperator instance is properly initialized with launcher_binary."""
op = BeamRunGoPipelineOperator(
**self.default_op_kwargs, launcher_binary=LAUNCHER_BINARY, dataflow_config={}
)
assert op.task_id == TASK_ID
assert op.go_file == ""
assert op.launcher_binary == LAUNCHER_BINARY
assert op.worker_binary == LAUNCHER_BINARY
assert op.runner == DEFAULT_RUNNER
assert op.default_pipeline_options == default_options
assert op.pipeline_options == pipeline_options
assert op.dataflow_config == {}
def test_init_with_launcher_binary_and_worker_binary(self, default_options, pipeline_options):
"""
Test BeamRunGoPipelineOperator instance is properly initialized with launcher_binary and
worker_binary.
"""
op = BeamRunGoPipelineOperator(
**self.default_op_kwargs,
launcher_binary=LAUNCHER_BINARY,
worker_binary=WORKER_BINARY,
dataflow_config={},
)
assert op.task_id == TASK_ID
assert op.go_file == ""
assert op.launcher_binary == LAUNCHER_BINARY
assert op.worker_binary == WORKER_BINARY
assert op.runner == DEFAULT_RUNNER
assert op.default_pipeline_options == default_options
assert op.pipeline_options == pipeline_options
assert op.dataflow_config == {}
@pytest.mark.parametrize(
("launcher_binary", "go_file"),
[
pytest.param("", "", id="both-empty"),
pytest.param(None, None, id="both-not-set"),
pytest.param(LAUNCHER_BINARY, GO_FILE, id="both-set"),
],
)
def test_init_with_neither_go_file_nor_launcher_binary_raises(self, launcher_binary, go_file):
"""
Test BeamRunGoPipelineOperator initialization raises ValueError when neither
go_file nor launcher_binary is provided.
"""
op = BeamRunGoPipelineOperator(
**self.default_op_kwargs, launcher_binary=launcher_binary, go_file=go_file
)
with pytest.raises(ValueError, match="Exactly one of `go_file` and `launcher_binary` must be set"):
op.execute({})
@mock.patch(
"tempfile.TemporaryDirectory",
return_value=MagicMock(__enter__=MagicMock(return_value="/tmp/apache-beam-go")),
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_exec_direct_runner_with_gcs_go_file(self, gcs_hook, beam_hook_mock, _):
"""Test BeamHook is created and the right args are passed to
start_go_workflow.
"""
start_go_pipeline_method = beam_hook_mock.return_value.start_go_pipeline
gcs_download_method = gcs_hook.return_value.download
op = BeamRunGoPipelineOperator(**self.default_op_kwargs, go_file=GO_FILE)
op.execute({})
beam_hook_mock.assert_called_once_with(runner=DEFAULT_RUNNER)
expected_options = {
"project": "test",
"staging_location": "gs://test/staging",
"output": "gs://test/output",
"labels": {"foo": "bar", "airflow-version": TEST_VERSION},
}
expected_go_file = "/tmp/apache-beam-go/main.go"
gcs_download_method.assert_called_once_with(
bucket_name="my-bucket", object_name="example/main.go", filename=expected_go_file
)
start_go_pipeline_method.assert_called_once_with(
variables=expected_options,
go_file=expected_go_file,
process_line_callback=None,
should_init_module=True,
)
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch("tempfile.TemporaryDirectory")
def test_exec_direct_runner_with_gcs_launcher_binary(
self, mock_tmp_dir, mock_beam_hook, mock_gcs_hook, tmp_path
):
"""
Test start_go_pipeline_from_binary is called with an executable launcher binary downloaded from GCS.
"""
def tmp_dir_side_effect(prefix: str) -> str:
sub_dir = tmp_path / mock_tmp_dir.call_args.kwargs["prefix"]
sub_dir.mkdir()
return str(sub_dir)
mock_tmp_dir.return_value.__enter__.side_effect = tmp_dir_side_effect
def gcs_download_side_effect(bucket_name: str, object_name: str, filename: str) -> None:
open(filename, "wb").close()
gcs_download_method = mock_gcs_hook.return_value.download
gcs_download_method.side_effect = gcs_download_side_effect
start_go_pipeline_method = mock_beam_hook.return_value.start_go_pipeline_with_binary
op = BeamRunGoPipelineOperator(**self.default_op_kwargs, launcher_binary="gs://bucket/path/to/main")
op.execute({})
expected_binary = f"{tmp_path}/apache-beam-go/launcher-main"
expected_options = {
"project": "test",
"staging_location": "gs://test/staging",
"output": "gs://test/output",
"labels": {"foo": "bar", "airflow-version": TEST_VERSION},
}
mock_beam_hook.assert_called_once_with(runner=DEFAULT_RUNNER)
mock_tmp_dir.assert_called_once_with(prefix="apache-beam-go")
gcs_download_method.assert_called_once_with(
bucket_name="bucket",
object_name="path/to/main",
filename=expected_binary,
)
assert os.access(expected_binary, os.X_OK)
start_go_pipeline_method.assert_called_once_with(
variables=expected_options,
launcher_binary=expected_binary,
worker_binary=expected_binary,
process_line_callback=None,
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch("airflow.providers.google.go_module_utils.init_module")
def test_exec_direct_runner_with_local_go_file(self, init_module, beam_hook_mock):
"""
Check that start_go_pipeline is called without initializing the Go module when source is locale.
"""
local_go_file_path = "/tmp/file/path/example.go"
operator = BeamRunGoPipelineOperator(
task_id=TASK_ID,
go_file=local_go_file_path,
)
start_go_pipeline_method = beam_hook_mock.return_value.start_go_pipeline
operator.execute({})
beam_hook_mock.assert_called_once_with(runner=DEFAULT_RUNNER)
init_module.assert_not_called()
start_go_pipeline_method.assert_called_once_with(
variables={"labels": {"airflow-version": TEST_VERSION}},
go_file=local_go_file_path,
process_line_callback=None,
should_init_module=False,
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
def test_exec_direct_runner_with_local_launcher_binary(self, mock_beam_hook):
"""
Test start_go_pipeline_with_binary is called with a local launcher binary.
"""
start_go_pipeline_method = mock_beam_hook.return_value.start_go_pipeline_with_binary
operator = BeamRunGoPipelineOperator(
task_id=TASK_ID,
launcher_binary="/local/path/to/main",
)
operator.execute({})
expected_binary = "/local/path/to/main"
mock_beam_hook.assert_called_once_with(runner=DEFAULT_RUNNER)
start_go_pipeline_method.assert_called_once_with(
variables={"labels": {"airflow-version": TEST_VERSION}},
launcher_binary=expected_binary,
worker_binary=expected_binary,
process_line_callback=None,
)
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(
"tempfile.TemporaryDirectory",
return_value=MagicMock(__enter__=MagicMock(return_value="/tmp/apache-beam-go")),
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_exec_dataflow_runner_with_go_file(
self, gcs_hook, dataflow_hook_mock, beam_hook_mock, _, persist_link_mock
):
"""Test DataflowHook is created and the right args are passed to
start_go_dataflow.
"""
gcs_download_method = gcs_hook.return_value.download
dataflow_config = DataflowConfiguration(impersonation_chain="test@impersonation.com")
op = BeamRunGoPipelineOperator(
runner="DataflowRunner",
dataflow_config=dataflow_config,
go_file=GO_FILE,
**self.default_op_kwargs,
)
op.execute({})
job_name = dataflow_hook_mock.build_dataflow_job_name.return_value
dataflow_hook_mock.assert_called_once_with(
gcp_conn_id=dataflow_config.gcp_conn_id,
poll_sleep=dataflow_config.poll_sleep,
impersonation_chain=dataflow_config.impersonation_chain,
drain_pipeline=dataflow_config.drain_pipeline,
cancel_timeout=dataflow_config.cancel_timeout,
wait_until_finished=dataflow_config.wait_until_finished,
)
expected_options = {
"project": dataflow_hook_mock.return_value.project_id,
"job_name": job_name,
"staging_location": "gs://test/staging",
"output": "gs://test/output",
"labels": {"foo": "bar", "airflow-version": TEST_VERSION},
"region": "us-central1",
}
persist_link_mock.assert_called_once_with(context={})
expected_go_file = "/tmp/apache-beam-go/main.go"
gcs_download_method.assert_called_once_with(
bucket_name="my-bucket", object_name="example/main.go", filename=expected_go_file
)
beam_hook_mock.return_value.start_go_pipeline.assert_called_once_with(
variables=expected_options,
go_file=expected_go_file,
process_line_callback=mock.ANY,
should_init_module=True,
)
dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with(
job_id=op.dataflow_job_id,
job_name=job_name,
location="us-central1",
multiple_jobs=False,
project_id=dataflow_config.project_id,
)
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("tempfile.TemporaryDirectory"))
def test_exec_dataflow_runner_with_launcher_binary_and_worker_binary(
self, mock_tmp_dir, mock_beam_hook, mock_gcs_hook, mock_dataflow_hook, mock_persist_link, tmp_path
):
"""
Test DataflowHook is created and start_go_pipeline_from_binary is called with
a launcher binary and a worker binary.
"""
def tmp_dir_side_effect(prefix: str) -> str:
sub_dir = tmp_path / mock_tmp_dir.call_args.kwargs["prefix"]
sub_dir.mkdir()
return str(sub_dir)
mock_tmp_dir.return_value.__enter__.side_effect = tmp_dir_side_effect
def gcs_download_side_effect(bucket_name: str, object_name: str, filename: str) -> None:
open(filename, "wb").close()
gcs_download_method = mock_gcs_hook.return_value.download
gcs_download_method.side_effect = gcs_download_side_effect
mock_dataflow_hook.build_dataflow_job_name.return_value = "test-job"
start_go_pipeline_method = mock_beam_hook.return_value.start_go_pipeline_with_binary
wait_for_done_method = mock_dataflow_hook.return_value.wait_for_done
dataflow_config = DataflowConfiguration(project_id="test-project")
operator = BeamRunGoPipelineOperator(
launcher_binary="gs://bucket/path/to/main1",
worker_binary="gs://bucket/path/to/main2",
runner="DataflowRunner",
dataflow_config=dataflow_config,
**self.default_op_kwargs,
)
operator.execute({})
expected_launcher_binary = str(tmp_path / "apache-beam-go/launcher-main1")
expected_worker_binary = str(tmp_path / "apache-beam-go/worker-main2")
expected_job_name = "test-job"
expected_options = {
"project": "test-project",
"job_name": expected_job_name,
"staging_location": "gs://test/staging",
"output": "gs://test/output",
"labels": {"foo": "bar", "airflow-version": TEST_VERSION},
"region": "us-central1",
}
mock_tmp_dir.assert_called_once_with(prefix="apache-beam-go")
gcs_download_method.assert_has_calls(
[
call(bucket_name="bucket", object_name="path/to/main1", filename=expected_launcher_binary),
call(bucket_name="bucket", object_name="path/to/main2", filename=expected_worker_binary),
],
)
assert os.access(expected_launcher_binary, os.X_OK)
assert os.access(expected_worker_binary, os.X_OK)
mock_dataflow_hook.assert_called_once_with(
gcp_conn_id=dataflow_config.gcp_conn_id,
poll_sleep=dataflow_config.poll_sleep,
impersonation_chain=dataflow_config.impersonation_chain,
drain_pipeline=dataflow_config.drain_pipeline,
cancel_timeout=dataflow_config.cancel_timeout,
wait_until_finished=dataflow_config.wait_until_finished,
)
start_go_pipeline_method.assert_called_once_with(
variables=expected_options,
launcher_binary=expected_launcher_binary,
worker_binary=expected_worker_binary,
process_line_callback=mock.ANY,
)
mock_persist_link.assert_called_once_with(context={})
wait_for_done_method.assert_called_once_with(
job_name=expected_job_name,
location=dataflow_config.location,
job_id=operator.dataflow_job_id,
multiple_jobs=False,
project_id=dataflow_config.project_id,
)
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowJobLink.persist"))
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
def test_on_kill_dataflow_runner(self, dataflow_hook_mock, _, __, ___):
dataflow_cancel_job = dataflow_hook_mock.return_value.cancel_job
op = BeamRunGoPipelineOperator(**self.default_op_kwargs, go_file=GO_FILE, runner="DataflowRunner")
op.execute({})
op.dataflow_job_id = JOB_ID
op.on_kill()
dataflow_cancel_job.assert_called_once_with(
job_id=JOB_ID, project_id=op.dataflow_config.project_id, location=op.dataflow_config.location
)
@mock.patch(BEAM_OPERATOR_PATH.format("BeamHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("DataflowHook"))
@mock.patch(BEAM_OPERATOR_PATH.format("GCSHook"))
def test_on_kill_direct_runner(self, _, dataflow_mock, __):
dataflow_cancel_job = dataflow_mock.return_value.cancel_job
op = BeamRunGoPipelineOperator(**self.default_op_kwargs, go_file=GO_FILE)
op.execute({})
op.on_kill()
dataflow_cancel_job.assert_not_called()
| TestBeamRunGoPipelineOperator |
python | pandas-dev__pandas | asv_bench/benchmarks/multiindex_object.py | {
"start": 8050,
"end": 9161
} | class ____:
params = [
("datetime", "int", "string", "ea_int"),
]
param_names = ["dtype"]
def setup(self, dtype):
N = 10**4 * 2
level1 = range(1000)
level2 = date_range(start="1/1/2000", periods=N // 1000)
dates_left = MultiIndex.from_product([level1, level2])
level2 = range(N // 1000)
int_left = MultiIndex.from_product([level1, level2])
level2 = Series(range(N // 1000), dtype="Int64")
level2[0] = NA
ea_int_left = MultiIndex.from_product([level1, level2])
level2 = Index([f"i-{i}" for i in range(N // 1000)], dtype=object).values
str_left = MultiIndex.from_product([level1, level2])
data = {
"datetime": dates_left,
"int": int_left,
"ea_int": ea_int_left,
"string": str_left,
}
data = {k: {"left": mi, "right": mi[:5]} for k, mi in data.items()}
self.left = data[dtype]["left"]
self.right = data[dtype]["right"]
def time_difference(self, dtype):
self.left.difference(self.right)
| Difference |
python | great-expectations__great_expectations | versioneer.py | {
"start": 18888,
"end": 19409
} | class ____:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
| VersioneerConfig |
python | redis__redis-py | redis/commands/search/aggregation.py | {
"start": 131,
"end": 417
} | class ____:
def __init__(self, offset: int = 0, count: int = 0) -> None:
self.offset = offset
self.count = count
def build_args(self):
if self.count:
return ["LIMIT", str(self.offset), str(self.count)]
else:
return []
| Limit |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 35900,
"end": 36023
} | class ____(BaseModel, extra="forbid"):
exp_decay: "DecayParamsExpression" = Field(..., description="")
| ExpDecayExpression |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 2617,
"end": 2731
} | class ____(
_fixtures.FixtureTest, testing.AssertsExecutionResults, AssertsUOW
):
run_inserts = None
| UOWTest |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 2237,
"end": 4450
} | class ____(NamedTuple):
"""Data class that contains configuration on what a clingo solve should output."""
#: Print out coarse timers for different solve phases
timers: bool
#: Whether to output Clingo's internal solver statistics
stats: bool
#: Optional output stream for the generated ASP program
out: Optional[io.IOBase]
#: If True, stop after setup and don't solve
setup_only: bool
#: Default output configuration for a solve
DEFAULT_OUTPUT_CONFIGURATION = OutputConfiguration(
timers=False, stats=False, out=None, setup_only=False
)
def default_clingo_control():
"""Return a control object with the default settings used in Spack"""
control = clingo().Control()
control.configuration.configuration = "tweety"
control.configuration.solver.heuristic = "Domain"
control.configuration.solver.opt_strategy = "usc"
return control
@contextmanager
def named_spec(
spec: Optional[spack.spec.Spec], name: Optional[str]
) -> Iterator[Optional[spack.spec.Spec]]:
"""Context manager to temporarily set the name of a spec"""
if spec is None or name is None:
yield spec
return
old_name = spec.name
spec.name = name
try:
yield spec
finally:
spec.name = old_name
# Below numbers are used to map names of criteria to the order
# they appear in the solution. See concretize.lp
# The space of possible priorities for optimization targets
# is partitioned in the following ranges:
#
# [0-100) Optimization criteria for software being reused
# [100-200) Fixed criteria that are higher priority than reuse, but lower than build
# [200-300) Optimization criteria for software being built
# [300-1000) High-priority fixed criteria
# [1000-inf) Error conditions
#
# Each optimization target is a minimization with optimal value 0.
#: High fixed priority offset for criteria that supersede all build criteria
high_fixed_priority_offset = 300
#: Priority offset for "build" criteria (regular criterio shifted to
#: higher priority for specs we have to build)
build_priority_offset = 200
#: Priority offset of "fixed" criteria (those w/o build criteria)
fixed_priority_offset = 100
| OutputConfiguration |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional_recurrent.py | {
"start": 1525,
"end": 17788
} | class ____(RNN):
"""Base class for convolutional-recurrent layers.
Args:
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the number of channels of the recurrent state
(which should be the same as the number of channels of the cell
output). This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
input_shape: Use this argument to specify the shape of the
input when this layer is the first one in a model.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
5D tensor with shape:
`(samples, timesteps, channels, rows, cols)`
if data_format='channels_first' or 5D tensor with shape:
`(samples, timesteps, rows, cols, channels)`
if data_format='channels_last'.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
- If `return_sequences`: 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)`
if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)`
if data_format='channels_last'.
- Else, 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
Masking:
This layer supports masking for input data with a variable number
of timesteps.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
- If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
- If functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers,
e.g. `(32, 10, 100, 100, 32)`.
Note that the number of rows and columns should be specified
too.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if unroll:
raise TypeError('Unrolling isn\'t possible with '
'convolutional RNNs.')
if isinstance(cell, (list, tuple)):
# The StackedConvRNN2DCells isn't implemented yet.
raise TypeError('It is not possible at the moment to'
'stack convolutional cells.')
super(ConvRNN2D, self).__init__(cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs)
self.input_spec = [InputSpec(ndim=5)]
self.states = None
self._num_constants = None
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif cell.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
cell.kernel_size[0],
padding=cell.padding,
stride=cell.strides[0],
dilation=cell.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
cell.kernel_size[1],
padding=cell.padding,
stride=cell.strides[1],
dilation=cell.dilation_rate[1])
if cell.data_format == 'channels_first':
output_shape = input_shape[:2] + (cell.filters, rows, cols)
elif cell.data_format == 'channels_last':
output_shape = input_shape[:2] + (rows, cols, cell.filters)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == 'channels_first':
output_shape += [(input_shape[0], cell.filters, rows, cols)
for _ in range(2)]
elif cell.data_format == 'channels_last':
output_shape += [(input_shape[0], rows, cols, cell.filters)
for _ in range(2)]
return output_shape
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == 'channels_first':
ch_dim = 1
elif self.cell.data_format == 'channels_last':
ch_dim = 3
if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format([spec.shape for spec in self.state_spec],
self.cell.state_size))
else:
if self.cell.data_format == 'channels_first':
self.state_spec = [InputSpec(shape=(None, dim, None, None))
for dim in state_size]
elif self.cell.data_format == 'channels_last':
self.state_spec = [InputSpec(shape=(None, None, None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = backend.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = backend.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(initial_state,
array_ops.zeros(tuple(shape),
initial_state.dtype),
padding=self.cell.padding)
if hasattr(self.cell.state_size, '__len__'):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
if isinstance(mask, list):
mask = mask[0]
timesteps = backend.int_shape(inputs)[1]
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
return self.cell.call(inputs, states, constants=constants, **kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = backend.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
input_length=timesteps)
if self.stateful:
updates = [
backend.update(self_state, state)
for self_state, state in zip(self.states, states)
]
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
state_shape = self.compute_output_shape(input_shape)
if self.return_state:
state_shape = state_shape[0]
if self.return_sequences:
state_shape = state_shape[:1].concatenate(state_shape[2:])
if None in state_shape:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.\n'
'The same thing goes for the number of rows and '
'columns.')
# helper function
def get_tuple_shape(nb_channels):
result = list(state_shape)
if self.cell.data_format == 'channels_first':
result[1] = nb_channels
elif self.cell.data_format == 'channels_last':
result[3] = nb_channels
else:
raise KeyError
return tuple(result)
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [backend.zeros(get_tuple_shape(dim))
for dim in self.cell.state_size]
else:
self.states = [backend.zeros(get_tuple_shape(self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
backend.set_value(state, np.zeros(get_tuple_shape(dim)))
else:
backend.set_value(self.states[0],
np.zeros(get_tuple_shape(self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, ' +
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != get_tuple_shape(dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str(get_tuple_shape(dim)) +
', found shape=' + str(value.shape))
# TODO(anjalisridhar): consider batch calls to `set_value`.
backend.set_value(state, value)
| ConvRNN2D |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 13688,
"end": 13827
} | class ____(SecurityBase):
type_: SecuritySchemeType = Field(default=SecuritySchemeType.oauth2, alias="type")
flows: OAuthFlows
| OAuth2 |
python | spyder-ide__spyder | spyder/utils/stylesheet.py | {
"start": 13995,
"end": 15610
} | class ____(SpyderStyleSheet):
"""Base style for tabbars."""
OBJECT_NAME = ''
# Additional border for scroll buttons
SCROLL_BUTTONS_BORDER_WIDTH = '0px'
# Position for the scroll buttons additional border
SCROLL_BUTTONS_BORDER_POS = ''
def set_stylesheet(self):
css = self.get_stylesheet()
buttons_color = SpyderPalette.COLOR_BACKGROUND_1
# Set style for scroll buttons
css[f'QTabBar{self.OBJECT_NAME} QToolButton'].setValues(
background=buttons_color,
borderRadius='0px',
)
if self.SCROLL_BUTTONS_BORDER_POS == 'right':
css[f'QTabBar{self.OBJECT_NAME} QToolButton'].setValues(
borderRight=(
f'{self.SCROLL_BUTTONS_BORDER_WIDTH} solid {buttons_color}'
)
)
else:
css[f'QTabBar{self.OBJECT_NAME} QToolButton'].setValues(
borderBottom=(
f'{self.SCROLL_BUTTONS_BORDER_WIDTH} solid {buttons_color}'
)
)
# Hover and pressed state for scroll buttons
for state in ['hover', 'pressed', 'checked', 'checked:hover']:
if state == 'hover':
color = SpyderPalette.COLOR_BACKGROUND_2
else:
color = SpyderPalette.COLOR_BACKGROUND_3
css[f'QTabBar{self.OBJECT_NAME} QToolButton:{state}'].setValues(
background=color
)
# Set width for scroll buttons
css['QTabBar::scroller'].setValues(
width='66px',
)
| BaseTabBarStyleSheet |
python | python-attrs__attrs | tests/test_funcs.py | {
"start": 17524,
"end": 19391
} | class ____:
"""
Tests for `assoc`.
"""
@given(slots=st.booleans(), frozen=st.booleans())
def test_empty(self, slots, frozen):
"""
Empty classes without changes get copied.
"""
@attr.s(slots=slots, frozen=frozen)
class C:
pass
i1 = C()
i2 = assoc(i1)
assert i1 is not i2
assert i1 == i2
@given(simple_classes())
def test_no_changes(self, C):
"""
No changes means a verbatim copy.
"""
i1 = C()
i2 = assoc(i1)
assert i1 is not i2
assert i1 == i2
@given(simple_classes(), st.data())
def test_change(self, C, data):
"""
Changes work.
"""
# Take the first attribute, and change it.
assume(fields(C)) # Skip classes with no attributes.
field_names = [a.name for a in fields(C)]
original = C()
chosen_names = data.draw(st.sets(st.sampled_from(field_names)))
change_dict = {name: data.draw(st.integers()) for name in chosen_names}
changed = assoc(original, **change_dict)
for k, v in change_dict.items():
assert getattr(changed, k) == v
@given(simple_classes())
def test_unknown(self, C):
"""
Wanting to change an unknown attribute raises an
AttrsAttributeNotFoundError.
"""
# No generated class will have a four letter attribute.
with pytest.raises(AttrsAttributeNotFoundError) as e:
assoc(C(), aaaa=2)
assert (f"aaaa is not an attrs attribute on {C!r}.",) == e.value.args
def test_frozen(self):
"""
Works on frozen classes.
"""
@attr.s(frozen=True)
class C:
x = attr.ib()
y = attr.ib()
assert C(3, 2) == assoc(C(1, 2), x=3)
| TestAssoc |
python | huggingface__transformers | src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py | {
"start": 44392,
"end": 44469
} | class ____(DeepseekVLProcessorKwargs):
pass
| DeepseekVLHybridProcessorKwargs |
python | pytorch__pytorch | torch/_inductor/compile_fx_ext.py | {
"start": 7313,
"end": 8144
} | class ____:
"""
For _SerializedFxCompile - encapsulates all the data being transferred
(returned) back from the child to the parent.
"""
graph: OutputCode
metrics: CachedMetricsDeltas
logs: list[logging.LogRecord]
warning_replay: Optional[list[warnings.WarningMessage]]
shape_env: Optional[torch.fx.experimental.symbolic_shapes.ShapeEnv]
def serialize(self) -> _WireProtocolPickledOutput:
"""
Turns this object into a _WireProtocolPickledOutput which can be
directly transferred across a stream.
"""
from torch.fx._graph_pickler import GraphPickler
if isinstance(self.graph, CompiledFxGraph):
self.graph.prepare_for_serialization()
return _WireProtocolPickledOutput(GraphPickler.dumps(self))
@dataclass
| _WireProtocolOutput |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 12689,
"end": 17053
} | class ____(django_test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
class PointedFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.PointedModel
foo = 'foo'
class PointerFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.PointerModel
bar = 'bar'
pointed = factory.SubFactory(PointedFactory, foo='new_foo')
class PointedRelatedFactory(PointedFactory):
pointer = factory.RelatedFactory(
PointerFactory,
factory_related_name='pointed',
)
class Meta:
skip_postgeneration_save = True
class PointerExtraFactory(PointerFactory):
pointed__foo = 'extra_new_foo'
class PointedRelatedExtraFactory(PointedRelatedFactory):
pointer__bar = 'extra_new_bar'
class PointedRelatedWithTraitFactory(PointedFactory):
class Params:
with_pointer = factory.Trait(
pointer=factory.RelatedFactory(
PointerFactory,
factory_related_name='pointed',
bar='with_trait',
)
)
class Meta:
skip_postgeneration_save = True
cls.PointedFactory = PointedFactory
cls.PointerFactory = PointerFactory
cls.PointedRelatedFactory = PointedRelatedFactory
cls.PointerExtraFactory = PointerExtraFactory
cls.PointedRelatedExtraFactory = PointedRelatedExtraFactory
cls.PointedRelatedWithTraitFactory = PointedRelatedWithTraitFactory
def test_create_pointed(self):
pointed = self.PointedFactory()
self.assertEqual(pointed, models.PointedModel.objects.get())
self.assertEqual(pointed.foo, 'foo')
def test_create_pointer(self):
pointer = self.PointerFactory()
self.assertEqual(pointer.pointed, models.PointedModel.objects.get())
self.assertEqual(pointer.pointed.foo, 'new_foo')
def test_create_pointer_with_deep_context(self):
pointer = self.PointerFactory(pointed__foo='new_new_foo')
self.assertEqual(pointer, models.PointerModel.objects.get())
self.assertEqual(pointer.bar, 'bar')
self.assertEqual(pointer.pointed, models.PointedModel.objects.get())
self.assertEqual(pointer.pointed.foo, 'new_new_foo')
def test_create_pointed_related(self):
pointed = self.PointedRelatedFactory()
self.assertEqual(pointed, models.PointedModel.objects.get())
self.assertEqual(pointed.foo, 'foo')
self.assertEqual(pointed.pointer, models.PointerModel.objects.get())
self.assertEqual(pointed.pointer.bar, 'bar')
def test_create_pointed_related_with_deep_context(self):
pointed = self.PointedRelatedFactory(pointer__bar='new_new_bar')
self.assertEqual(pointed, models.PointedModel.objects.get())
self.assertEqual(pointed.foo, 'foo')
self.assertEqual(pointed.pointer, models.PointerModel.objects.get())
self.assertEqual(pointed.pointer.bar, 'new_new_bar')
def test_create_pointer_extra(self):
pointer = self.PointerExtraFactory()
self.assertEqual(pointer, models.PointerModel.objects.get())
self.assertEqual(pointer.bar, 'bar')
self.assertEqual(pointer.pointed, models.PointedModel.objects.get())
self.assertEqual(pointer.pointed.foo, 'extra_new_foo')
def test_create_pointed_related_extra(self):
pointed = self.PointedRelatedExtraFactory()
self.assertEqual(pointed, models.PointedModel.objects.get())
self.assertEqual(pointed.foo, 'foo')
self.assertEqual(pointed.pointer, models.PointerModel.objects.get())
self.assertEqual(pointed.pointer.bar, 'extra_new_bar')
def test_create_pointed_related_with_trait(self):
pointed = self.PointedRelatedWithTraitFactory(
with_pointer=True
)
self.assertEqual(pointed, models.PointedModel.objects.get())
self.assertEqual(pointed.foo, 'foo')
self.assertEqual(pointed.pointer, models.PointerModel.objects.get())
self.assertEqual(pointed.pointer.bar, 'with_trait')
| DjangoRelatedFieldTestCase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-yandex-metrica/source_yandex_metrica/streams.py | {
"start": 9227,
"end": 9323
} | class ____(IncrementalYandexMetricaStream):
primary_key = "watchID"
_source = "hits"
| Views |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/io_ops/checkpoint_ops_test.py | {
"start": 13091,
"end": 19656
} | class ____(test.TestCase):
"""Tests for the load_and_remap_matrix() op.
(Specifically focused on the max_rows_in_memory arg and its effects on
TensorBundle's BundleReader and TensorSlice logic).
"""
def _test_loading_variable_with_max_rows(self, np_value, partitioner,
max_rows_in_memory):
"""Helper function for various tests using max_rows_in_memory."""
ops.reset_default_graph()
old_tensor_name = 'matrix_to_load_and_remap'
matrix = variable_scope.get_variable(
old_tensor_name,
dtype=dtypes.float32,
initializer=constant_op.constant(np_value, dtype=dtypes.float32),
partitioner=partitioner)
with self.cached_session() as sess:
ckpt_path = os.path.join(test.get_temp_dir(), 'temp_ckpt')
save = saver.Saver([matrix])
self.evaluate(variables.global_variables_initializer())
save.save(sess, ckpt_path)
num_rows, num_cols = np_value.shape
# Tests loading the entire tensor (except reversed).
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Simply reverses the rows of the matrix.
row_remapping=list(range(num_rows - 1, -1, -1)),
col_remapping=[],
initializing_values=[],
num_rows=num_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(np_value[::-1], self.evaluate(remapped_matrix))
# Tests loading the tensor (except for the first and last rows), with
# uninitialized values. Requires num_rows to be at least 3 since we're
# skipping the first and last rows.
self.assertGreater(num_rows, 2)
prefix_rows = 2
suffix_rows = 3
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Reverses the rows of the matrix, then prepends and appends
# uninitialized rows.
row_remapping=([-1] * prefix_rows + list(range(1, num_rows - 1)) +
[-1] * suffix_rows),
col_remapping=[],
initializing_values=[42] * (prefix_rows + suffix_rows) * num_cols,
num_rows=num_rows - 2 + prefix_rows + suffix_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(
np.vstack([
np.tile(42, [prefix_rows, num_cols]), np_value[1:-1],
np.tile(42, [suffix_rows, num_cols])
]), self.evaluate(remapped_matrix))
# Tests when everything is taken from initializing_values.
new_rows = 7
initializing_values = [42] * new_rows * num_cols
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
# Nothing is loaded from the old tensor.
row_remapping=[-1] * new_rows,
col_remapping=[],
initializing_values=initializing_values,
num_rows=new_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(
np.reshape(initializing_values, (new_rows, num_cols)),
self.evaluate(remapped_matrix))
@test_util.run_deprecated_v1
def test_loading_rows_divisible_by_max_rows(self):
"""Tests loading normal var when rows are evenly divisible by max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=None,
# 9 is evenly divisible by 3.
max_rows_in_memory=3)
@test_util.run_deprecated_v1
def test_loading_rows_not_divisible_by_max_rows(self):
"""Tests loading normal var when rows aren't divisible by max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=None,
# 9 is not evenly divisible by 4.
max_rows_in_memory=4)
@test_util.run_deprecated_v1
def test_loading_rows_less_than_max_rows(self):
"""Tests loading normal var as a single slice.
(When the specified max_rows_in_memory is larger than the number of rows)
"""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=None,
# 10 > 9.
max_rows_in_memory=10)
@test_util.run_deprecated_v1
def test_loading_no_max_rows(self):
"""Tests loading normal var as a single slice with no valid max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 18)), (6, 3)),
partitioner=None,
max_rows_in_memory=-1)
@test_util.run_deprecated_v1
def test_loading_partitions_equals_max_rows(self):
"""Tests loading partitioned var sliced on partition boundary."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
# With a tensor of shape [9, 3] and 3 partitions, each partition has
# exactly 3 rows.
max_rows_in_memory=3)
@test_util.run_deprecated_v1
def test_loading_partitions_greater_than_max_rows(self):
"""Tests loading partitioned var with more slices than partitions."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
# Even though each partition has 3 rows, we'll only load the tensor one
# row at a time.
max_rows_in_memory=1)
@test_util.run_deprecated_v1
def test_loading_partitions_less_than_max_rows(self):
"""Tests loading partitioned var as a single slice.
(When the specified max_rows_in_memory is larger than the number of rows)
"""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
max_rows_in_memory=10)
@test_util.run_deprecated_v1
def test_loading_partitions_no_max_rows(self):
"""Tests loading partitioned var as single slice with no valid max_rows."""
self._test_loading_variable_with_max_rows(
np_value=np.reshape(list(range(0, 36)), (9, 4)),
partitioner=partitioned_variables.fixed_size_partitioner(3),
max_rows_in_memory=-1)
if __name__ == '__main__':
test.main()
| LoadAndRemapMatrixWithMaxRowsTest |
python | kamyu104__LeetCode-Solutions | Python/bulb-switcher-ii.py | {
"start": 29,
"end": 410
} | class ____(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if m == 0:
return 1
if n == 1:
return 2
if m == 1 and n == 2:
return 3
if m == 1 or n == 2:
return 4
if m == 2:
return 7
return 8
| Solution |
python | davidhalter__jedi | test/completion/pep0484_comments.py | {
"start": 764,
"end": 1753
} | class ____:
pass
from typing import List, Tuple
x = [] # type: List[Employee]
#? Employee()
x[1]
x, y, z = [], [], [] # type: List[int], List[int], List[str]
#? int()
y[2]
x, y, z = [], [], [] # type: (List[float], List[float], List[BB])
for zi in z:
#? BB()
zi
x = [
1,
2,
] # type: List[str]
#? str()
x[1]
for bar in foo(): # type: str
#? str()
bar
for bar, baz in foo(): # type: int, float
#? int()
bar
#? float()
baz
for bar, baz in foo():
# type: str, str
""" type hinting on next line should not work """
#?
bar
#?
baz
with foo(): # type: int
...
with foo() as f: # type: str
#? str()
f
with foo() as f:
# type: str
""" type hinting on next line should not work """
#?
f
aaa = some_extremely_long_function_name_that_doesnt_leave_room_for_hints() \
# type: float # We should be able to put hints on the next line with a \
#? float()
aaa
# Test instance methods
| Employee |
python | huggingface__transformers | src/transformers/models/stablelm/modeling_stablelm.py | {
"start": 7969,
"end": 10445
} | class ____(nn.Module):
def __init__(self, dim, num_heads, eps=1e-5, bias=False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.norms = nn.ModuleList([nn.LayerNorm(dim, eps=eps, bias=bias) for _ in range(self.num_heads)])
def forward(self, hidden_states: torch.Tensor):
# Split along the num_heads axis to get per-head inputs
# [batch_size, num_heads, seq_len, head_dim] -> [batch_size, 1, seq_len, head_dim] * num_heads
states_per_heads = torch.split(hidden_states, 1, dim=1)
# Normalize and merge the heads back together
return torch.cat([norm(hidden_states) for norm, hidden_states in zip(self.norms, states_per_heads)], dim=1)
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
# Copied from transformers.models.llama.modeling_llama.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| StableLmLayerNormPerHead |
python | python__mypy | mypy/errors.py | {
"start": 48473,
"end": 52334
} | class ____(Exception):
"""Exception raised when there is a compile error.
It can be a parse, semantic analysis, type check or other
compilation-related error.
CompileErrors raised from an errors object carry all of the
messages that have not been reported out by error streaming.
This is patched up by build.build to contain either all error
messages (if errors were streamed) or none (if they were not).
"""
messages: list[str]
use_stdout = False
# Can be set in case there was a module with a blocking error
module_with_blocker: str | None = None
def __init__(
self, messages: list[str], use_stdout: bool = False, module_with_blocker: str | None = None
) -> None:
super().__init__("\n".join(messages))
self.messages = messages
self.use_stdout = use_stdout
self.module_with_blocker = module_with_blocker
def remove_path_prefix(path: str, prefix: str | None) -> str:
"""If path starts with prefix, return copy of path with the prefix removed.
Otherwise, return path. If path is None, return None.
"""
if prefix is not None and path.startswith(prefix):
return path[len(prefix) :]
else:
return path
def report_internal_error(
err: Exception,
file: str | None,
line: int,
errors: Errors,
options: Options,
stdout: TextIO | None = None,
stderr: TextIO | None = None,
) -> NoReturn:
"""Report internal error and exit.
This optionally starts pdb or shows a traceback.
"""
stdout = stdout or sys.stdout
stderr = stderr or sys.stderr
# Dump out errors so far, they often provide a clue.
# But catch unexpected errors rendering them.
try:
for msg in errors.new_messages():
print(msg)
except Exception as e:
print("Failed to dump errors:", repr(e), file=stderr)
# Compute file:line prefix for official-looking error messages.
if file:
if line:
prefix = f"{file}:{line}: "
else:
prefix = f"{file}: "
else:
prefix = ""
# Print "INTERNAL ERROR" message.
print(
f"{prefix}error: INTERNAL ERROR --",
"Please try using mypy master on GitHub:\n"
"https://mypy.readthedocs.io/en/stable/common_issues.html"
"#using-a-development-mypy-build",
file=stderr,
)
if options.show_traceback:
print("Please report a bug at https://github.com/python/mypy/issues", file=stderr)
else:
print(
"If this issue continues with mypy master, "
"please report a bug at https://github.com/python/mypy/issues",
file=stderr,
)
print(f"version: {mypy_version}", file=stderr)
# If requested, drop into pdb. This overrides show_tb.
if options.pdb:
print("Dropping into pdb", file=stderr)
import pdb
pdb.post_mortem(sys.exc_info()[2])
# If requested, print traceback, else print note explaining how to get one.
if options.raise_exceptions:
raise err
if not options.show_traceback:
if not options.pdb:
print(
"{}: note: please use --show-traceback to print a traceback "
"when reporting a bug".format(prefix),
file=stderr,
)
else:
tb = traceback.extract_stack()[:-2]
tb2 = traceback.extract_tb(sys.exc_info()[2])
print("Traceback (most recent call last):")
for s in traceback.format_list(tb + tb2):
print(s.rstrip("\n"))
print(f"{type(err).__name__}: {err}", file=stdout)
print(f"{prefix}: note: use --pdb to drop into pdb", file=stderr)
# Exit. The caller has nothing more to say.
# We use exit code 2 to signal that this is no ordinary error.
raise SystemExit(2)
| CompileError |
python | numpy__numpy | numpy/polynomial/tests/test_laguerre.py | {
"start": 5919,
"end": 9699
} | class ____:
def test_lagint(self):
# check exceptions
assert_raises(TypeError, lag.lagint, [0], .5)
assert_raises(ValueError, lag.lagint, [0], -1)
assert_raises(ValueError, lag.lagint, [0], 1, [0, 0])
assert_raises(ValueError, lag.lagint, [0], lbnd=[0])
assert_raises(ValueError, lag.lagint, [0], scl=[0])
assert_raises(TypeError, lag.lagint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0] * (i - 2) + [1]
res = lag.lagint([0], m=i, k=k)
assert_almost_equal(res, [1, -1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [1 / scl]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i])
res = lag.lag2poly(lagint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(lag.lagval(-1, lagint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [2 / scl]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i], scl=2)
res = lag.lag2poly(lagint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1)
res = lag.lagint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k])
res = lag.lagint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1)
res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k], scl=2)
res = lag.lagint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_lagint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T
res = lag.lagint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagint(c) for c in c2d])
res = lag.lagint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagint(c, k=3) for c in c2d])
res = lag.lagint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
| TestIntegral |
python | django__django | django/db/backends/base/features.py | {
"start": 93,
"end": 17184
} | class ____:
# An optional tuple indicating the minimum supported database version.
minimum_database_version = None
gis_enabled = False
# Oracle can't group by LOB (large object) data types.
allows_group_by_lob = True
allows_group_by_selected_pks = False
allows_group_by_select_index = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend support self-reference subqueries in the DELETE
# statement?
delete_can_self_reference_subquery = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
# Does the backend supports specifying whether NULL values should be
# considered distinct in unique constraints?
supports_nulls_distinct_unique_constraints = False
# Does the backend support initially deferrable unique constraints?
supports_deferrable_unique_constraints = False
can_use_chunked_reads = True
can_return_columns_from_insert = False
can_return_rows_from_bulk_insert = False
can_return_rows_from_update = False
has_bulk_insert = True
uses_savepoints = True
can_release_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries_with_in = True
has_select_for_update = False
has_select_for_update_nowait = False
has_select_for_update_skip_locked = False
has_select_for_update_of = False
has_select_for_no_key_update = False
# Does the database's SELECT FOR UPDATE OF syntax require a column rather
# than a table?
select_for_update_of_column = False
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
# Does the backend ignore unnecessary ORDER BY clauses in subqueries?
ignores_unnecessary_order_by_in_subqueries = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver supports same type temporal data subtraction
# by returning the type used to store duration field?
supports_temporal_subtraction = False
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Does the backend support NULLS FIRST and NULLS LAST in ORDER BY?
supports_order_by_nulls_modifier = True
# Does the backend orders NULLS FIRST by default?
order_by_nulls_first = False
# The database's limit on the number of query parameters.
max_query_params = None
# Can an object have an autoincrement primary key of 0?
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Map fields which some backends may not be able to differentiate to the
# field it's introspected as.
introspected_field_types = {
"AutoField": "AutoField",
"BigAutoField": "BigAutoField",
"BigIntegerField": "BigIntegerField",
"BinaryField": "BinaryField",
"BooleanField": "BooleanField",
"CharField": "CharField",
"DurationField": "DurationField",
"GenericIPAddressField": "GenericIPAddressField",
"IntegerField": "IntegerField",
"PositiveBigIntegerField": "PositiveBigIntegerField",
"PositiveIntegerField": "PositiveIntegerField",
"PositiveSmallIntegerField": "PositiveSmallIntegerField",
"SmallAutoField": "SmallAutoField",
"SmallIntegerField": "SmallIntegerField",
"TimeField": "TimeField",
}
# Can the backend introspect the column order (ASC/DESC) for indexes?
supports_index_column_ordering = True
# Does the backend support introspection of materialized views?
can_introspect_materialized_views = False
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
schema_editor_uses_clientside_param_binding = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Can it create foreign key constraints inline when adding columns?
can_create_inline_fk = True
# Can an index be renamed?
can_rename_index = False
# Does it automatically index foreign keys?
indexes_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
supports_table_check_constraints = True
# Does the backend support introspection of CHECK constraints?
can_introspect_check_constraints = True
# Does the backend support 'pyformat' style:
# ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized
# ones?
requires_literal_defaults = False
# Does the backend support functions in defaults?
supports_expression_defaults = True
# Does the backend support the DEFAULT keyword in insert queries?
supports_default_keyword_in_insert = True
# Does the backend support the DEFAULT keyword in bulk insert queries?
supports_default_keyword_in_bulk_insert = True
# Does the backend require a connection reset after each material schema
# change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = False
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ""
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
# Does the backend support "select for update" queries with limit (and
# offset)?
supports_select_for_update_with_limit = True
# Does the backend ignore null expressions in GREATEST and LEAST queries
# unless every expression is null?
greatest_least_ignores_nulls = False
# Can the backend clone databases for parallel test execution?
# Defaults to False to allow third-party backends to opt-in.
can_clone_databases = False
# Does the backend consider table names with different casing to
# be equal?
ignores_table_name_case = False
# Place FOR UPDATE right after FROM clause. Used on MSSQL.
for_update_after_from = False
# Combinatorial flags
supports_select_union = True
supports_select_intersection = True
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_parentheses_in_compound = True
requires_compound_order_by_subquery = False
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?
supports_aggregate_filter_clause = False
# Does the database support ORDER BY in aggregate expressions?
supports_aggregate_order_by_clause = False
# Does the database backend support DISTINCT when using multiple arguments
# in an aggregate expression? For example, Sqlite treats the "delimiter"
# argument of STRING_AGG/GROUP_CONCAT as an extra argument and does not
# allow using a custom delimiter along with DISTINCT.
supports_aggregate_distinct_multiple_argument = True
# Does the database support SQL 2023 ANY_VALUE in GROUP BY?
supports_any_value = False
# Does the backend support indexing a TextField?
supports_index_on_text_field = True
# Does the backend support window expressions (expression OVER (...))?
supports_over_clause = False
supports_frame_range_fixed_distance = False
supports_frame_exclusion = False
only_supports_unbounded_with_preceding_and_following = False
# Does the backend support CAST with precision?
supports_cast_with_precision = True
# How many second decimals does the database return when casting a value to
# a type with time?
time_cast_precision = 6
# SQL to create a procedure for use by the Django test suite. The
# functionality of the procedure isn't important.
create_test_procedure_without_params_sql = None
create_test_procedure_with_int_param_sql = None
# Does the backend support keyword parameters for cursor.callproc()?
supports_callproc_kwargs = False
# What formats does the backend EXPLAIN syntax support?
supported_explain_formats = set()
# Does the backend support the default parameter in lead() and lag()?
supports_default_in_lead_lag = True
# Does the backend support ignoring constraint or uniqueness errors during
# INSERT?
supports_ignore_conflicts = True
# Does the backend support updating rows on constraint or uniqueness errors
# during INSERT?
supports_update_conflicts = False
supports_update_conflicts_with_target = False
# Does this backend require casting the results of CASE expressions used
# in UPDATE statements to ensure the expression has the correct type?
requires_casted_case_in_updates = False
# Does the backend support partial indexes (CREATE INDEX ... WHERE ...)?
supports_partial_indexes = True
supports_functions_in_partial_indexes = True
# Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)?
supports_covering_indexes = False
# Does the backend support indexes on expressions?
supports_expression_indexes = True
# Does the backend treat COLLATE as an indexed expression?
collate_as_index_expression = False
# Does the database allow more than one constraint or index on the same
# field(s)?
allows_multiple_constraints_on_same_fields = True
# Does the backend support boolean expressions in SELECT and GROUP BY
# clauses?
supports_boolean_expr_in_select_clause = True
# Does the backend support comparing boolean expressions in WHERE clauses?
# Eg: WHERE (price > 0) IS NOT NULL
supports_comparing_boolean_expr = True
# Does the backend support JSONField?
supports_json_field = True
# Can the backend introspect a JSONField?
can_introspect_json_field = True
# Does the backend support primitives in JSONField?
supports_primitives_in_json_field = True
# Is there a true datatype for JSON?
has_native_json_field = False
# Does the backend use PostgreSQL-style JSON operators like '->'?
has_json_operators = False
# Does the backend support __contains and __contained_by lookups for
# a JSONField?
supports_json_field_contains = True
# Does value__d__contains={'f': 'g'} (without a list around the dict) match
# {'d': [{'f': 'g'}]}?
json_key_contains_list_matching_requires_list = False
# Does the backend support JSONObject() database function?
has_json_object_function = True
# Does the backend support negative JSON array indexing?
supports_json_negative_indexing = True
# Does the backend support column collations?
supports_collation_on_charfield = True
supports_collation_on_textfield = True
# Does the backend support non-deterministic collations?
supports_non_deterministic_collations = True
# Does the backend support column and table comments?
supports_comments = False
# Does the backend support column comments in ADD COLUMN statements?
supports_comments_inline = False
# Does the backend support stored generated columns?
supports_stored_generated_columns = False
# Does the backend support virtual generated columns?
supports_virtual_generated_columns = False
# Does the backend support the logical XOR operator?
supports_logical_xor = False
# Set to (exception, message) if null characters in text are disallowed.
prohibits_null_characters_in_text_exception = None
# Does the backend support unlimited character columns?
supports_unlimited_charfield = False
# Does the backend support numeric columns with no precision?
supports_no_precision_decimalfield = False
# Does the backend support native tuple lookups (=, >, <, IN)?
supports_tuple_lookups = True
# Does the backend support native tuple gt(e), lt(e) comparisons against
# subqueries?
supports_tuple_comparison_against_subquery = True
# Does the backend support CASCADE, DEFAULT, NULL as delete options?
supports_on_delete_db_cascade = True
supports_on_delete_db_default = True
supports_on_delete_db_null = True
# Collation names for use by the Django test suite.
test_collations = {
"ci": None, # Case-insensitive.
"cs": None, # Case-sensitive.
"non_default": None, # Non-default.
"swedish_ci": None, # Swedish case-insensitive.
"virtual": None, # A collation that can be used for virtual columns.
}
# SQL template override for tests.aggregation.tests.NowUTC
test_now_utc_template = None
# SQL to create a model instance using the database defaults.
insert_test_table_with_defaults = None
# Does the Round() database function round to even?
rounds_to_even = False
# A set of dotted paths to tests in Django's test suite that are expected
# to fail on this database.
django_test_expected_failures = set()
# A map of reasons to sets of dotted paths to tests in Django's test suite
# that should be skipped for this database.
django_test_skips = {}
def __init__(self, connection):
self.connection = connection
def __del__(self):
del self.connection
@cached_property
def supports_explaining_query_execution(self):
"""Does this backend support explaining query execution?"""
return self.connection.ops.explain_prefix is not None
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute("CREATE TABLE ROLLBACK_TEST (X INT)")
self.connection.set_autocommit(False)
cursor.execute("INSERT INTO ROLLBACK_TEST (X) VALUES (8)")
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute("SELECT COUNT(X) FROM ROLLBACK_TEST")
(count,) = cursor.fetchone()
cursor.execute("DROP TABLE ROLLBACK_TEST")
return count == 0
def allows_group_by_selected_pks_on_model(self, model):
if not self.allows_group_by_selected_pks:
return False
return model._meta.managed
| BaseDatabaseFeatures |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 61842,
"end": 64832
} | class ____(Operation):
def __init__(
self,
num_classes=None,
axis=-1,
dtype=None,
sparse=False,
*,
name=None,
**kwargs,
):
if num_classes is None and "num_tokens" in kwargs:
num_classes = kwargs.pop("num_tokens")
if num_classes is None:
raise ValueError("Argument `num_classes` must be specified.")
super().__init__(name=name)
self.num_classes = num_classes
self.axis = axis
self.dtype = dtype or backend.floatx()
self.sparse = sparse
def call(self, inputs):
return backend.nn.multi_hot(
inputs,
num_classes=self.num_classes,
axis=self.axis,
dtype=self.dtype,
)
def compute_output_spec(self, inputs):
x_shape = list(getattr(inputs, "shape", []))
if self.axis == -1:
x_shape.append(self.num_classes)
elif self.axis >= 0 and self.axis < len(x_shape):
x_shape.insert(self.axis, self.num_classes)
else:
raise ValueError(
f"axis must be -1 or between [0, {len(inputs.shape)}), but "
f"received {self.axis}."
)
if len(x_shape) == 2:
x_shape = [x_shape[-1]]
else:
x_shape = [x_shape[0]] + x_shape[2:]
return KerasTensor(x_shape, dtype=inputs.dtype, sparse=self.sparse)
@keras_export(
[
"keras.ops.multi_hot",
"keras.ops.nn.multi_hot",
]
)
def multi_hot(
inputs, num_classes=None, axis=-1, dtype=None, sparse=False, **kwargs
):
"""Encodes integer labels as multi-hot vectors.
This function encodes integer labels as multi-hot vectors, where each label
is mapped to a binary value in the resulting vector.
Args:
inputs: Tensor of integer labels to be converted to multi-hot vectors.
num_classes: Integer, the total number of unique classes.
axis: (optional) Axis along which the multi-hot encoding should be
added. Defaults to `-1`, which corresponds to the last dimension.
dtype: (optional) The data type of the resulting tensor. Default
is backend's float type.
sparse: Whether to return a sparse tensor; for backends that support
sparse tensors.
Returns:
Tensor: The multi-hot encoded tensor.
Example:
>>> data = keras.ops.convert_to_tensor([0, 4])
>>> keras.ops.multi_hot(data, num_classes=5)
array([1.0, 0.0, 0.0, 0.0, 1.0], dtype=float32)
"""
if num_classes is None and "num_tokens" in kwargs:
num_classes = kwargs.pop("num_tokens")
if num_classes is None:
raise ValueError("Argument `num_classes` must be specified.")
if any_symbolic_tensors((inputs,)):
return MultiHot(num_classes, axis, dtype, sparse).symbolic_call(inputs)
return backend.nn.multi_hot(inputs, num_classes, axis, dtype, sparse)
| MultiHot |
python | pypa__pip | tests/unit/test_vcs.py | {
"start": 38342,
"end": 42492
} | class ____(TestCase):
def setUp(self) -> None:
patcher = mock.patch("pip._internal.vcs.versioncontrol.call_subprocess")
self.addCleanup(patcher.stop)
self.call_subprocess_mock = patcher.start()
# Test Data.
self.url = "svn+http://username:password@svn.example.com/"
# use_interactive is set to False to test that remote call options are
# properly added.
self.svn = Subversion(use_interactive=False)
self.rev_options = RevOptions(Subversion)
self.dest = "/tmp/test"
def assert_call_args(self, args: CommandArgs) -> None:
assert self.call_subprocess_mock.call_args[0][0] == args
def test_obtain(self) -> None:
self.svn.obtain(self.dest, hide_url(self.url), verbosity=1)
self.assert_call_args(
[
"svn",
"checkout",
"--non-interactive",
"--username",
"username",
"--password",
hide_value("password"),
hide_url("http://svn.example.com/"),
"/tmp/test",
]
)
def test_obtain_quiet(self) -> None:
self.svn.obtain(self.dest, hide_url(self.url), verbosity=0)
self.assert_call_args(
[
"svn",
"checkout",
"--quiet",
"--non-interactive",
"--username",
"username",
"--password",
hide_value("password"),
hide_url("http://svn.example.com/"),
"/tmp/test",
]
)
def test_fetch_new(self) -> None:
self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=1)
self.assert_call_args(
[
"svn",
"checkout",
"--non-interactive",
hide_url("svn+http://username:password@svn.example.com/"),
"/tmp/test",
]
)
def test_fetch_new_quiet(self) -> None:
self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=0)
self.assert_call_args(
[
"svn",
"checkout",
"--quiet",
"--non-interactive",
hide_url("svn+http://username:password@svn.example.com/"),
"/tmp/test",
]
)
def test_fetch_new_revision(self) -> None:
rev_options = RevOptions(Subversion, "123")
self.svn.fetch_new(self.dest, hide_url(self.url), rev_options, verbosity=1)
self.assert_call_args(
[
"svn",
"checkout",
"--non-interactive",
"-r",
"123",
hide_url("svn+http://username:password@svn.example.com/"),
"/tmp/test",
]
)
def test_fetch_new_revision_quiet(self) -> None:
rev_options = RevOptions(Subversion, "123")
self.svn.fetch_new(self.dest, hide_url(self.url), rev_options, verbosity=0)
self.assert_call_args(
[
"svn",
"checkout",
"--quiet",
"--non-interactive",
"-r",
"123",
hide_url("svn+http://username:password@svn.example.com/"),
"/tmp/test",
]
)
def test_switch(self) -> None:
self.svn.switch(self.dest, hide_url(self.url), self.rev_options)
self.assert_call_args(
[
"svn",
"switch",
"--non-interactive",
hide_url("svn+http://username:password@svn.example.com/"),
"/tmp/test",
]
)
def test_update(self) -> None:
self.svn.update(self.dest, hide_url(self.url), self.rev_options)
self.assert_call_args(
[
"svn",
"update",
"--non-interactive",
"/tmp/test",
]
)
| TestSubversionArgs |
python | huggingface__transformers | examples/modular-transformers/modeling_dummy_bert.py | {
"start": 12879,
"end": 15170
} | class ____(nn.Module):
def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = DummyBertCrossAttention if is_cross_attention else DummyBertSelfAttention
self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx)
self.output = DummyBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
past_key_value=past_key_value,
cache_position=cache_position,
**kwargs,
)
attention_output = self.output(attention_output, hidden_states)
return attention_output, attn_weights
| DummyBertAttention |
python | getsentry__sentry | src/sentry/testutils/region.py | {
"start": 316,
"end": 414
} | class ____:
regions: frozenset[Region]
default_region: Region
| _TemporaryRegionDirectoryState |
python | getsentry__sentry | src/sentry/apidocs/examples/metric_alert_examples.py | {
"start": 51,
"end": 8962
} | class ____:
LIST_METRIC_ALERT_RULES = [
OpenApiExample(
"List metric alert rules for an organization",
value=[
{
"id": "7",
"name": "Counting Bad Request and Unauthorized Errors in Prod",
"organizationId": "237655244234",
"queryType": 0,
"dataset": "events",
"query": "tags[http.status_code]:[400, 401]",
"aggregate": "count()",
"thresholdType": 0,
"resolveThreshold": None,
"timeWindow": 1440,
"environment": "prod",
"triggers": [
{
"id": "394289",
"alertRuleId": "17723",
"label": "critical",
"thresholdType": 0,
"alertThreshold": 100,
"resolveThreshold": None,
"dateCreated": "2023-09-25T22:15:26.375126Z",
"actions": [
{
"id": "394280",
"alertRuleTriggerId": "92481",
"type": "slack",
"targetType": "specific",
"targetIdentifier": "30489048931789",
"inputChannelId": "#my-channel",
"integrationId": "8753467",
"sentryAppId": None,
"dateCreated": "2023-09-25T22:15:26.375126Z",
}
],
},
],
"projects": ["super-cool-project"],
"owner": "user:53256",
"originalAlertRuleId": None,
"comparisonDelta": None,
"dateModified": "2023-09-25T22:15:26.375126Z",
"dateCreated": "2023-09-25T22:15:26.375126Z",
"createdBy": {"id": 983948, "name": "John Doe", "email": "john.doe@sentry.io"},
}
],
status_codes=["200"],
response_only=True,
)
]
CREATE_METRIC_ALERT_RULE = [
OpenApiExample(
"Create a metric alert rule for an organization",
value={
"id": "177104",
"name": "Apdex % Check",
"organizationId": "4505676595200000",
"queryType": 2,
"dataset": "metrics",
"query": "",
"aggregate": "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate",
"thresholdType": 0,
"resolveThreshold": 80.0,
"timeWindow": 120,
"environment": None,
"triggers": [
{
"id": "293990",
"alertRuleId": "177104",
"label": "critical",
"thresholdType": 0,
"alertThreshold": 75,
"resolveThreshold": 80.0,
"dateCreated": "2023-09-25T22:01:28.673305Z",
"actions": [
{
"id": "281887",
"alertRuleTriggerId": "293990",
"type": "email",
"targetType": "team",
"targetIdentifier": "2378589792734981",
"inputChannelId": None,
"integrationId": None,
"sentryAppId": None,
"dateCreated": "2023-09-25T22:01:28.680793Z",
}
],
},
{
"id": "492849",
"alertRuleId": "482923",
"label": "warning",
"thresholdType": 1,
"alertThreshold": 50,
"resolveThreshold": 80,
"dateCreated": "2023-09-25T22:01:28.673305Z",
"actions": [],
},
],
"projects": ["our-project"],
"owner": "team:4505676595200000",
"originalAlertRuleId": None,
"comparisonDelta": 10080,
"dateModified": "2023-09-25T22:01:28.637506Z",
"dateCreated": "2023-09-25T22:01:28.637514Z",
"createdBy": {
"id": 2837708,
"name": "Jane Doe",
"email": "jane.doe@sentry.io",
},
},
status_codes=["201"],
response_only=True,
)
]
GET_METRIC_ALERT_RULE = [
OpenApiExample(
"Get detailed view about a metric alert rule",
value={
"id": "177412243058",
"name": "My Metric Alert Rule",
"organizationId": "4505676595200000",
"queryType": 0,
"dataset": "events",
"query": "",
"aggregate": "count_unique(user)",
"thresholdType": 0,
"resolveThreshold": None,
"timeWindow": 60,
"environment": None,
"triggers": [
{
"id": "294385908",
"alertRuleId": "177412243058",
"label": "critical",
"thresholdType": 0,
"alertThreshold": 31.0,
"resolveThreshold": None,
"dateCreated": "2023-09-26T22:14:17.557579Z",
"actions": [],
}
],
"projects": ["my-coolest-project"],
"owner": "team:29508397892374892",
"dateModified": "2023-09-26T22:14:17.522166Z",
"dateCreated": "2023-09-26T22:14:17.522196Z",
"createdBy": {
"id": 2834985497897,
"name": "Somebody That I Used to Know",
"email": "anon@sentry.io",
},
"eventTypes": ["default", "error"],
},
status_codes=["200"],
response_only=True,
)
]
UPDATE_METRIC_ALERT_RULE = [
OpenApiExample(
"Update a metric alert rule",
value={
"id": "345989573",
"name": "P30 Transaction Duration",
"organizationId": "02403489017",
"queryType": 1,
"dataset": "transactions",
"query": "",
"aggregate": "percentile(transaction.duration,0.3)",
"thresholdType": 1,
"resolveThreshold": None,
"timeWindow": 60,
"environment": None,
"triggers": [
{
"id": "0543809890",
"alertRuleId": "345989573",
"label": "critical",
"thresholdType": 1,
"alertThreshold": 70.0,
"resolveThreshold": None,
"dateCreated": "2023-09-25T23:35:31.832084Z",
"actions": [],
}
],
"projects": ["backend"],
"owner": "team:9390258908",
"originalAlertRuleId": None,
"comparisonDelta": 10080.0,
"dateModified": "2023-09-25T23:35:31.787866Z",
"dateCreated": "2023-09-25T23:35:31.787875Z",
"createdBy": {
"id": 902843590658,
"name": "Spongebob Squarepants",
"email": "spongebob.s@example.com",
},
},
status_codes=["200"],
response_only=True,
)
]
GET_METRIC_ALERT_ANOMALIES = [
OpenApiExample(
"Fetch a list of anomalies for a metric alert rule",
value=[
{
"timestamp": 0.1,
"value": 100.0,
"anomaly": {
"anomaly_type": "anomaly_higher_confidence",
"anomaly_value": 100,
},
}
],
)
]
| MetricAlertExamples |
python | huggingface__transformers | src/transformers/models/ministral/modeling_ministral.py | {
"start": 8978,
"end": 9756
} | class ____(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
MinistralRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| MinistralRMSNorm |
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 1519,
"end": 1612
} | class ____(models.Model):
name = models.CharField(max_length=42, default='')
| DummyTestModel |
python | encode__httpx | httpx/_exceptions.py | {
"start": 7098,
"end": 7392
} | class ____(StreamError):
"""
Attempted to read or stream response content, but the request has been
closed.
"""
def __init__(self) -> None:
message = "Attempted to read or stream content, but the stream has been closed."
super().__init__(message)
| StreamClosed |
python | pypa__pipenv | pipenv/patched/pip/_internal/network/session.py | {
"start": 7328,
"end": 8740
} | class ____(BaseAdapter):
def send(
self,
request: PreparedRequest,
stream: bool = False,
timeout: Optional[Union[float, Tuple[float, float]]] = None,
verify: Union[bool, str] = True,
cert: Optional[Union[str, Tuple[str, str]]] = None,
proxies: Optional[Mapping[str, str]] = None,
) -> Response:
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
# format the exception raised as a io.BytesIO object,
# to return a better error message:
resp.status_code = 404
resp.reason = type(exc).__name__
resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode())
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict(
{
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
}
)
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self) -> None:
pass
| LocalFSAdapter |
python | pdm-project__pdm | src/pdm/termui.py | {
"start": 4920,
"end": 11034
} | class ____:
"""Terminal UI object"""
MAX_LOG_SIZE = 100 * 1024 * 1024 # 100MB
def __init__(
self, verbosity: Verbosity = Verbosity.NORMAL, *, exit_stack: contextlib.ExitStack | None = None
) -> None:
self.verbosity = verbosity
self.exit_stack = exit_stack or contextlib.ExitStack()
self.log_dir: str | None = None
def set_verbosity(self, verbosity: int) -> None:
self.verbosity = Verbosity(verbosity)
if self.verbosity == Verbosity.QUIET:
self.exit_stack.enter_context(warnings.catch_warnings())
warnings.simplefilter("ignore", PDMWarning, append=True)
warnings.simplefilter("ignore", FutureWarning, append=True)
def set_theme(self, theme: Theme) -> None:
"""set theme for rich console
:param theme: dict of theme
"""
rich.get_console().push_theme(theme)
_err_console.push_theme(theme)
def echo(
self,
message: str | RichProtocol = "",
err: bool = False,
verbosity: Verbosity = Verbosity.QUIET,
**kwargs: Any,
) -> None:
"""print message using rich console
:param message: message with rich markup, defaults to "".
:param err: if true print to stderr, defaults to False.
:param verbosity: verbosity level, defaults to QUIET.
"""
if self.verbosity >= verbosity:
console = _err_console if err else rich.get_console()
if not console.is_interactive:
kwargs.setdefault("crop", False)
kwargs.setdefault("overflow", "ignore")
console.print(message, **kwargs)
def display_columns(self, rows: Sequence[Sequence[str]], header: list[str] | None = None) -> None:
"""Print rows in aligned columns.
:param rows: a rows of data to be displayed.
:param header: a list of header strings.
"""
if header:
table = Table(box=ROUNDED)
for title in header:
if title[0] == "^":
title, justify = title[1:], "center"
elif title[0] == ">":
title, justify = title[1:], "right"
else:
title, justify = title, "left"
table.add_column(title, justify=justify)
else:
table = Table.grid(padding=(0, 1))
for _ in rows[0]:
table.add_column()
for row in rows:
table.add_row(*row)
rich.print(table)
@contextlib.contextmanager
def logging(self, type_: str = "install") -> Iterator[logging.Logger]:
"""A context manager that opens a file for logging when verbosity is NORMAL or
print to the stdout otherwise.
"""
log_file: str | None = None
if self.verbosity >= Verbosity.DETAIL:
handler: logging.Handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS[self.verbosity])
else:
if self.log_dir and not os.path.exists(self.log_dir):
os.makedirs(self.log_dir, exist_ok=True)
self._clean_logs()
fp, log_file = tempfile.mkstemp(".log", f"pdm-{type_}-", self.log_dir)
handler = logging.StreamHandler(
TruncatedIO(self.exit_stack.enter_context(open(fp, "a", encoding="utf-8")), self.MAX_LOG_SIZE)
)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
logger.addHandler(handler)
unearth_logger.addHandler(handler)
def cleanup() -> None:
if not log_file:
return
with contextlib.suppress(OSError):
os.unlink(log_file)
try:
yield logger
except Exception:
if self.verbosity < Verbosity.DETAIL:
logger.exception("Error occurs")
self.echo(
f"See [warning]{log_file}[/] for detailed debug log.",
style="error",
err=True,
)
raise
else:
self.exit_stack.callback(cleanup)
finally:
logger.removeHandler(handler)
unearth_logger.removeHandler(handler)
handler.close()
def open_spinner(self, title: str) -> Spinner:
"""Open a spinner as a context manager."""
if self.verbosity >= Verbosity.DETAIL or not is_interactive():
return DummySpinner(title)
else:
return _err_console.status(title, spinner=SPINNER, spinner_style="primary")
def make_progress(self, *columns: str | ProgressColumn, **kwargs: Any) -> Progress:
"""create a progress instance for indented spinners"""
return Progress(*columns, disable=self.verbosity >= Verbosity.DETAIL, **kwargs)
def info(self, message: str, verbosity: Verbosity = Verbosity.NORMAL) -> None:
"""Print a message to stdout."""
self.echo(f"[info]INFO:[/] [dim]{message}[/]", err=True, verbosity=verbosity)
def deprecated(self, message: str, verbosity: Verbosity = Verbosity.NORMAL) -> None:
"""Print a message to stdout."""
self.echo(f"[warning]DEPRECATED:[/] [dim]{message}[/]", err=True, verbosity=verbosity)
def warn(self, message: str, verbosity: Verbosity = Verbosity.NORMAL) -> None:
"""Print a message to stdout."""
self.echo(f"[warning]WARNING:[/] {message}", err=True, verbosity=verbosity)
def error(self, message: str, verbosity: Verbosity = Verbosity.QUIET) -> None:
"""Print a message to stdout."""
self.echo(f"[error]ERROR:[/] {message}", err=True, verbosity=verbosity)
def _clean_logs(self) -> None:
import time
from pathlib import Path
if self.log_dir is None:
return
for file in Path(self.log_dir).iterdir():
if not file.is_file():
continue
if file.stat().st_ctime < time.time() - 7 * 24 * 60 * 60: # 7 days
file.unlink()
| UI |
python | run-llama__llama_index | llama-index-core/llama_index/core/data_structs/table.py | {
"start": 448,
"end": 523
} | class ____(IndexStruct):
"""Struct outputs."""
@dataclass
| BaseStructTable |
python | sphinx-doc__sphinx | sphinx/util/matching.py | {
"start": 1901,
"end": 5557
} | class ____:
"""A pattern matcher for Multiple shell-style glob patterns.
Note: this modifies the patterns to work with copy_asset().
For example, "**/index.rst" matches with "index.rst"
"""
def __init__(self, exclude_patterns: Iterable[str]) -> None:
expanded = [pat[3:] for pat in exclude_patterns if pat.startswith('**/')]
self.patterns = compile_matchers(list(exclude_patterns) + expanded)
def __call__(self, string: str) -> bool:
return self.match(string)
def match(self, string: str) -> bool:
string = canon_path(string)
return any(pat(string) for pat in self.patterns)
DOTFILES = Matcher(['**/.*'])
_pat_cache: dict[str, re.Pattern[str]] = {}
def patmatch(name: str, pat: str) -> re.Match[str] | None:
"""Return if name matches the regular expression (pattern)
``pat```. Adapted from fnmatch module.
"""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
return _pat_cache[pat].match(name)
def patfilter(names: Iterable[str], pat: str) -> list[str]:
"""Return the subset of the list ``names`` that match
the regular expression (pattern) ``pat``.
Adapted from fnmatch module.
"""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
match = _pat_cache[pat].match
return list(filter(match, names))
def get_matching_files(
dirname: str | os.PathLike[str],
include_patterns: Iterable[str] = ('**',),
exclude_patterns: Iterable[str] = (),
) -> Iterator[str]:
"""Get all file names in a directory, recursively.
Filter file names by the glob-style include_patterns and exclude_patterns.
The default values include all files ("**") and exclude nothing ("").
Only files matching some pattern in *include_patterns* are included, and
exclusions from *exclude_patterns* take priority over inclusions.
"""
# dirname is a normalized absolute path.
dirname = Path(dirname).resolve()
exclude_matchers = compile_matchers(exclude_patterns)
include_matchers = compile_matchers(include_patterns)
for root, dirs, files in os.walk(dirname, followlinks=True):
relative_root = os.path.relpath(root, dirname)
if relative_root == '.':
relative_root = '' # suppress dirname for files on the target dir
relative_root_path = Path(relative_root)
# Filter files
included_files = []
for entry in sorted(files):
entry = _unicode_nfc((relative_root_path / entry).as_posix())
keep = False
for matcher in include_matchers:
if matcher(entry):
keep = True
break # break the inner loop
for matcher in exclude_matchers:
if matcher(entry):
keep = False
break # break the inner loop
if keep:
included_files.append(entry)
# Filter directories
filtered_dirs = []
for dir_name in sorted(dirs):
normalised = _unicode_nfc((relative_root_path / dir_name).as_posix())
for matcher in exclude_matchers:
if matcher(normalised):
break # break the inner loop
else:
# if the loop didn't break
filtered_dirs.append(dir_name)
dirs[:] = filtered_dirs
# Yield filtered files
yield from included_files
def _unicode_nfc(s: str, /) -> str:
"""Normalise the string to NFC form."""
return unicodedata.normalize('NFC', s)
| Matcher |
python | pypa__setuptools | setuptools/tests/test_manifest.py | {
"start": 9266,
"end": 18562
} | class ____(TempDirTestCase):
"""
A copy of the relevant bits of distutils/tests/test_filelist.py,
to ensure setuptools' version of FileList keeps parity with distutils.
"""
@pytest.fixture(autouse=os.getenv("SETUPTOOLS_USE_DISTUTILS") == "stdlib")
def _compat_record_logs(self, monkeypatch, caplog):
"""Account for stdlib compatibility"""
def _log(_logger, level, msg, args):
exc = sys.exc_info()
rec = logging.LogRecord("distutils", level, "", 0, msg, args, exc)
caplog.records.append(rec)
monkeypatch.setattr(log.Log, "_log", _log)
def get_records(self, caplog, *levels):
return [r for r in caplog.records if r.levelno in levels]
def assertNoWarnings(self, caplog):
assert self.get_records(caplog, log.WARN) == []
caplog.clear()
def assertWarnings(self, caplog):
if IS_PYPY and not caplog.records:
pytest.xfail("caplog checks may not work well in PyPy")
else:
assert len(self.get_records(caplog, log.WARN)) > 0
caplog.clear()
def make_files(self, files):
for file in files:
file = os.path.join(self.temp_dir, file)
dirname, _basename = os.path.split(file)
os.makedirs(dirname, exist_ok=True)
touch(file)
def test_process_template_line(self):
# testing all MANIFEST.in template patterns
file_list = FileList()
ml = make_local_path
# simulated file list
self.make_files([
'foo.tmp',
'ok',
'xo',
'four.txt',
'buildout.cfg',
# filelist does not filter out VCS directories,
# it's sdist that does
ml('.hg/last-message.txt'),
ml('global/one.txt'),
ml('global/two.txt'),
ml('global/files.x'),
ml('global/here.tmp'),
ml('f/o/f.oo'),
ml('dir/graft-one'),
ml('dir/dir2/graft2'),
ml('dir3/ok'),
ml('dir3/sub/ok.txt'),
])
MANIFEST_IN = DALS(
"""\
include ok
include xo
exclude xo
include foo.tmp
include buildout.cfg
global-include *.x
global-include *.txt
global-exclude *.tmp
recursive-include f *.oo
recursive-exclude global *.x
graft dir
prune dir3
"""
)
for line in MANIFEST_IN.split('\n'):
if not line:
continue
file_list.process_template_line(line)
wanted = [
'buildout.cfg',
'four.txt',
'ok',
ml('.hg/last-message.txt'),
ml('dir/graft-one'),
ml('dir/dir2/graft2'),
ml('f/o/f.oo'),
ml('global/one.txt'),
ml('global/two.txt'),
]
file_list.sort()
assert file_list.files == wanted
def test_exclude_pattern(self):
# return False if no match
file_list = FileList()
assert not file_list.exclude_pattern('*.py')
# return True if files match
file_list = FileList()
file_list.files = ['a.py', 'b.py']
assert file_list.exclude_pattern('*.py')
# test excludes
file_list = FileList()
file_list.files = ['a.py', 'a.txt']
file_list.exclude_pattern('*.py')
file_list.sort()
assert file_list.files == ['a.txt']
def test_include_pattern(self):
# return False if no match
file_list = FileList()
self.make_files([])
assert not file_list.include_pattern('*.py')
# return True if files match
file_list = FileList()
self.make_files(['a.py', 'b.txt'])
assert file_list.include_pattern('*.py')
# test * matches all files
file_list = FileList()
self.make_files(['a.py', 'b.txt'])
file_list.include_pattern('*')
file_list.sort()
assert file_list.files == ['a.py', 'b.txt']
def test_process_template_line_invalid(self):
# invalid lines
file_list = FileList()
for action in (
'include',
'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft',
'prune',
'blarg',
):
with pytest.raises(DistutilsTemplateError):
file_list.process_template_line(action)
def test_include(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# include
file_list = FileList()
self.make_files(['a.py', 'b.txt', ml('d/c.py')])
file_list.process_template_line('include *.py')
file_list.sort()
assert file_list.files == ['a.py']
self.assertNoWarnings(caplog)
file_list.process_template_line('include *.rb')
file_list.sort()
assert file_list.files == ['a.py']
self.assertWarnings(caplog)
def test_exclude(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# exclude
file_list = FileList()
file_list.files = ['a.py', 'b.txt', ml('d/c.py')]
file_list.process_template_line('exclude *.py')
file_list.sort()
assert file_list.files == ['b.txt', ml('d/c.py')]
self.assertNoWarnings(caplog)
file_list.process_template_line('exclude *.rb')
file_list.sort()
assert file_list.files == ['b.txt', ml('d/c.py')]
self.assertWarnings(caplog)
def test_global_include(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# global-include
file_list = FileList()
self.make_files(['a.py', 'b.txt', ml('d/c.py')])
file_list.process_template_line('global-include *.py')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.py')]
self.assertNoWarnings(caplog)
file_list.process_template_line('global-include *.rb')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.py')]
self.assertWarnings(caplog)
def test_global_exclude(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# global-exclude
file_list = FileList()
file_list.files = ['a.py', 'b.txt', ml('d/c.py')]
file_list.process_template_line('global-exclude *.py')
file_list.sort()
assert file_list.files == ['b.txt']
self.assertNoWarnings(caplog)
file_list.process_template_line('global-exclude *.rb')
file_list.sort()
assert file_list.files == ['b.txt']
self.assertWarnings(caplog)
def test_recursive_include(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# recursive-include
file_list = FileList()
self.make_files(['a.py', ml('d/b.py'), ml('d/c.txt'), ml('d/d/e.py')])
file_list.process_template_line('recursive-include d *.py')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertNoWarnings(caplog)
file_list.process_template_line('recursive-include e *.py')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertWarnings(caplog)
def test_recursive_exclude(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# recursive-exclude
file_list = FileList()
file_list.files = ['a.py', ml('d/b.py'), ml('d/c.txt'), ml('d/d/e.py')]
file_list.process_template_line('recursive-exclude d *.py')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.txt')]
self.assertNoWarnings(caplog)
file_list.process_template_line('recursive-exclude e *.py')
file_list.sort()
assert file_list.files == ['a.py', ml('d/c.txt')]
self.assertWarnings(caplog)
def test_graft(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# graft
file_list = FileList()
self.make_files(['a.py', ml('d/b.py'), ml('d/d/e.py'), ml('f/f.py')])
file_list.process_template_line('graft d')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertNoWarnings(caplog)
file_list.process_template_line('graft e')
file_list.sort()
assert file_list.files == [ml('d/b.py'), ml('d/d/e.py')]
self.assertWarnings(caplog)
def test_prune(self, caplog):
caplog.set_level(logging.DEBUG)
ml = make_local_path
# prune
file_list = FileList()
file_list.files = ['a.py', ml('d/b.py'), ml('d/d/e.py'), ml('f/f.py')]
file_list.process_template_line('prune d')
file_list.sort()
assert file_list.files == ['a.py', ml('f/f.py')]
self.assertNoWarnings(caplog)
file_list.process_template_line('prune e')
file_list.sort()
assert file_list.files == ['a.py', ml('f/f.py')]
self.assertWarnings(caplog)
| TestFileListTest |
python | doocs__leetcode | solution/3000-3099/3046.Split the Array/Solution.py | {
"start": 0,
"end": 121
} | class ____:
def isPossibleToSplit(self, nums: List[int]) -> bool:
return max(Counter(nums).values()) < 3
| Solution |
python | cython__cython | Cython/Compiler/AutoDocTransforms.py | {
"start": 214,
"end": 1414
} | class ____(ExpressionWriter):
"""
A Cython code writer for Python expressions in argument/variable annotations.
"""
def __init__(self, description=None):
"""description is optional. If specified it is used in
warning messages for the nodes that don't convert to string properly.
If not specified then no messages are generated.
"""
ExpressionWriter.__init__(self)
self.description = description
self.incomplete = False
def visit_Node(self, node):
self.put("<???>")
self.incomplete = True
if self.description:
warning(node.pos,
"Failed to convert code to string representation in {}".format(
self.description), level=1)
def visit_LambdaNode(self, node):
# XXX Should we do better?
self.put("<lambda>")
self.incomplete = True
if self.description:
warning(node.pos,
"Failed to convert lambda to string representation in {}".format(
self.description), level=1)
def visit_AnnotationNode(self, node):
self.put(node.string.value)
| AnnotationWriter |
python | jazzband__prettytable | tests/test_sections.py | {
"start": 1674,
"end": 2302
} | class ____:
def test_clear_rows(self, helper_table: PrettyTable) -> None:
helper_table.add_row([0, "a", "b", "c"], divider=True)
helper_table.clear_rows()
assert helper_table.rows == []
assert helper_table.dividers == []
assert helper_table.field_names == ["", "Field 1", "Field 2", "Field 3"]
def test_clear(self, helper_table: PrettyTable) -> None:
helper_table.add_row([0, "a", "b", "c"], divider=True)
helper_table.clear()
assert helper_table.rows == []
assert helper_table.dividers == []
assert helper_table.field_names == []
| TestClearing |
python | nryoung__algorithms | tests/test_sorting.py | {
"start": 295,
"end": 538
} | class ____(unittest.TestCase):
"""
Shared code for a sorting unit test.
"""
def setUp(self):
self.input = list(range(10))
random.shuffle(self.input)
self.correct = list(range(10))
| SortingAlgorithmTestCase |
python | huggingface__transformers | src/transformers/models/llava/modeling_llava.py | {
"start": 2335,
"end": 3789
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
| LlavaCausalLMOutputWithPast |
python | facebook__pyre-check | tools/upgrade/commands/tests/global_version_update_test.py | {
"start": 458,
"end": 4580
} | class ____(unittest.TestCase):
@patch("subprocess.run")
@patch.object(Repository, "commit_changes")
@patch.object(
Configuration, "find_project_configuration", return_value=Path("/root")
)
@patch.object(Configuration, "set_version")
@patch.object(Configuration, "write")
@patch.object(
Configuration,
"gather_local_configurations",
return_value=[
Configuration(
Path("/root/a/.pyre_configuration.local"), {"use_buck_builder": False}
),
Configuration(
Path("/root/b/.pyre_configuration.local"), {"use_buck_builder": True}
),
],
)
@patch.object(upgrade.Fixme, "run")
@patch("builtins.open")
def test_run_global_version_update(
self,
open_mock,
run_fixme,
gather_local_configurations,
configuration_write,
configuration_set_version,
find_project_configuration,
commit_changes,
subprocess,
) -> None:
arguments = MagicMock()
arguments.hash = "abcd"
arguments.paths = []
arguments.no_commit = False
with patch("json.dump"):
mocks = [
mock_open(read_data='{"version": "old"}').return_value,
mock_open(read_data='{"use_buck_builder": false}').return_value,
mock_open(read_data='{"use_buck_builder": true}').return_value,
]
open_mock.side_effect = mocks
GlobalVersionUpdate.from_arguments(arguments, repository).run()
configuration_set_version.assert_has_calls(
[call("abcd"), call("old"), call("old")]
)
configuration_write.assert_has_calls([call(), call(), call()])
commit_changes.assert_called_once_with(
commit=True,
title="Update pyre global configuration version",
summary="Automatic upgrade to hash `abcd`",
ignore_failures=True,
)
# Paths passed from arguments will override the local configuration list
# Therefore, we only read the first json configuration.
subprocess.reset_mock()
configuration_set_version.reset_mock()
configuration_write.reset_mock()
arguments.paths = [Path("foo/bar")]
with patch("json.dump"):
mocks = [
mock_open(read_data='{"version": "old"}').return_value,
mock_open(read_data="{}").return_value,
]
open_mock.side_effect = mocks
GlobalVersionUpdate.from_arguments(arguments, repository).run()
configuration_set_version.assert_has_calls([call("abcd"), call("old")])
configuration_write.assert_has_calls([call(), call()])
subprocess.assert_has_calls([])
# Run fixme if global version has sources.
subprocess.reset_mock()
configuration_set_version.reset_mock()
configuration_write.reset_mock()
commit_changes.reset_mock()
arguments.paths = []
with patch("json.dump"):
mocks = [
mock_open(
read_data='{"version": "old", "source_directories": ["source"]}'
).return_value,
mock_open(read_data='{"use_buck_builder": false}').return_value,
mock_open(read_data='{"use_buck_builder": true}').return_value,
]
open_mock.side_effect = mocks
GlobalVersionUpdate.from_arguments(arguments, repository).run()
configuration_set_version.assert_has_calls(
[call("abcd"), call("old"), call("old")]
)
configuration_write.assert_has_calls([call(), call(), call()])
run_fixme.assert_called_once()
commit_changes.assert_called_once_with(
commit=True,
title="Update pyre global configuration version",
summary="Automatic upgrade to hash `abcd`",
ignore_failures=True,
)
| UpdateGlobalVersionTest |
python | tensorflow__tensorflow | tensorflow/python/types/internal.py | {
"start": 1170,
"end": 1325
} | class ____(object):
"""Interface for internal isinstance checks to framework/tensor_spec.py.
This helps to avoid circular dependencies.
"""
| TensorSpec |
python | pytorch__pytorch | torch/autograd/grad_mode.py | {
"start": 2558,
"end": 4172
} | class ____(_NoParamDecoratorContextManager):
r"""Context-manager that enables gradient calculation.
Enables gradient calculation, if it has been disabled via :class:`~no_grad`
or :class:`~set_grad_enabled`.
This context manager is thread local; it will not affect computation
in other threads.
Also functions as a decorator.
.. note::
enable_grad is one of several mechanisms that can enable or
disable gradients locally see :ref:`locally-disable-grad-doc` for
more information on how they compare.
.. note::
This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
Example::
>>> # xdoctest: +SKIP
>>> x = torch.tensor([1.], requires_grad=True)
>>> with torch.no_grad():
... with torch.enable_grad():
... y = x * 2
>>> y.requires_grad
True
>>> y.backward()
>>> x.grad
tensor([2.])
>>> @torch.enable_grad()
... def doubler(x):
... return x * 2
>>> with torch.no_grad():
... z = doubler(x)
>>> z.requires_grad
True
>>> @torch.enable_grad()
... def tripler(x):
... return x * 3
>>> with torch.no_grad():
... z = tripler(x)
>>> z.requires_grad
True
"""
def __enter__(self) -> None:
self.prev = torch.is_grad_enabled()
torch._C._set_grad_enabled(True)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
torch._C._set_grad_enabled(self.prev)
| enable_grad |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 7418,
"end": 7738
} | class ____:
target_E = -2.06261
solution = [1.34941, -1.34941]
xmin = np.array([-10., -10])
xmax = np.array([10., 10])
def fun(self, x):
arg = abs(100 - sqrt(x[0]**2 + x[1]**2)/pi)
val = np.power(abs(sin(x[0]) * sin(x[1]) * exp(arg)) + 1., 0.1)
return -0.0001 * val
| CrossInTray |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 123425,
"end": 123742
} | class ____:
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
| TestStdVarComplex |
python | huggingface__transformers | tests/utils/test_hf_argparser.py | {
"start": 1502,
"end": 1631
} | class ____:
foo: int = 42
baz: str = field(default="toto", metadata={"help": "help message"})
@dataclass
| WithDefaultExample |
python | gabrielfalcao__HTTPretty | httpretty/core.py | {
"start": 30862,
"end": 37526
} | class ____(BaseClass):
"""Created by :py:meth:`~httpretty.core.httpretty.register_uri` and
stored in memory as internal representation of a HTTP
request/response definition.
Args:
method (str): One of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``.
uri (str|re.Pattern): The URL to match
adding_headers (dict): Extra headers to be added to the response
forcing_headers (dict): Overwrite response headers.
status (int): The status code for the response, defaults to ``200``.
streaming (bool): Whether should stream the response into chunks via generator.
headers: Headers to inject in the faked response.
Returns:
httpretty.Entry: containing the request-matching metadata.
.. warning:: When using the ``forcing_headers`` option make sure to add the header ``Content-Length`` to match at most the total body length, otherwise some HTTP clients can hang indefinitely.
"""
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, str):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
"""validates the body size with the value of the ``Content-Length``
header
"""
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
igot = None
try:
igot = int(got)
except (ValueError, TypeError):
warnings.warn(
'HTTPretty got to register the Content-Length header '
'with "%r" which is not a number' % got)
return
if igot and igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header '
'Content-Length you registered expects size "%d" but '
'the body you registered for that has actually length '
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry {} {} getting {}>'.format(
self.method,
self.uri,
self.status
)
def normalize_headers(self, headers):
"""Normalize keys in header names so that ``COntent-tyPe`` becomes ``content-type``
:param headers: dict
:returns: dict
"""
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
"""writes HTTP Response data to a file descriptor
:parm fk: a file-like object
.. warning:: **side-effect:** this method moves the cursor of the given file object to zero
"""
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(
self.normalize_headers(
self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers)
headers = self.normalize_headers(headers)
# TODO: document this behavior:
if 'content-length' not in headers:
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length',
self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
server = headers.pop('server', None)
if server:
string_list.append('server: %s' % server)
for k, v in headers.items():
string_list.append(
'{}: {}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset=None):
"""escapes special characters
"""
if charset:
warnings.warn("{}.url_fix() charset argument is deprecated".format(__name__), DeprecationWarning)
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
| Entry |
python | pytest-dev__pytest | testing/code/test_source.py | {
"start": 1643,
"end": 2417
} | class ____:
def setup_class(self) -> None:
self.source = Source(
"""\
def f(x):
pass
def g(x):
pass
"""
)
def test_getrange(self) -> None:
x = self.source[0:2]
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getrange_step_not_supported(self) -> None:
with pytest.raises(IndexError, match=r"step"):
self.source[::2]
def test_getline(self) -> None:
x = self.source[0]
assert x == "def f(x):"
def test_len(self) -> None:
assert len(self.source) == 4
def test_iter(self) -> None:
values = [x for x in self.source]
assert len(values) == 4
| TestAccesses |
python | dask__distributed | distributed/diagnostics/memory_sampler.py | {
"start": 483,
"end": 5883
} | class ____:
"""Sample cluster-wide memory usage every <interval> seconds.
**Usage**
.. code-block:: python
client = Client()
ms = MemorySampler()
with ms.sample("run 1"):
<run first workflow>
with ms.sample("run 2"):
<run second workflow>
...
ms.plot()
or with an asynchronous client:
.. code-block:: python
client = await Client(asynchronous=True)
ms = MemorySampler()
async with ms.sample("run 1"):
<run first workflow>
async with ms.sample("run 2"):
<run second workflow>
...
ms.plot()
"""
samples: dict[str, list[tuple[float, int]]]
def __init__(self):
self.samples = {}
def sample(
self,
label: str | None = None,
*,
client: Client | None = None,
measure: str = "process",
interval: float = 0.5,
) -> Any:
"""Context manager that records memory usage in the cluster.
This is synchronous if the client is synchronous and
asynchronous if the client is asynchronous.
The samples are recorded in ``self.samples[<label>]``.
Parameters
==========
label: str, optional
Tag to record the samples under in the self.samples dict.
Default: automatically generate a random label
client: Client, optional
client used to connect to the scheduler.
Default: use the global client
measure: str, optional
One of the measures from :class:`distributed.scheduler.MemoryState`.
Default: sample process memory
interval: float, optional
sampling interval, in seconds.
Default: 0.5
"""
if not client:
from distributed.client import get_client
client = get_client()
if client.asynchronous:
return self._sample_async(label, client, measure, interval)
else:
return self._sample_sync(label, client, measure, interval)
@contextmanager
def _sample_sync(
self, label: str | None, client: Client, measure: str, interval: float
) -> Iterator[None]:
key = client.sync(
client.scheduler.memory_sampler_start,
client=client.id,
measure=measure,
interval=interval,
)
try:
yield
finally:
samples = client.sync(client.scheduler.memory_sampler_stop, key=key)
self.samples[label or key] = samples
@asynccontextmanager
async def _sample_async(
self, label: str | None, client: Client, measure: str, interval: float
) -> AsyncIterator[None]:
key = await client.scheduler.memory_sampler_start(
client=client.id, measure=measure, interval=interval
)
try:
yield
finally:
samples = await client.scheduler.memory_sampler_stop(key=key)
self.samples[label or key] = samples
def to_pandas(self, *, align: bool = False) -> pd.DataFrame:
"""Return the data series as a pandas.Dataframe.
Parameters
==========
align : bool, optional
If True, change the absolute timestamps into time deltas from the first
sample of each series, so that different series can be visualized side by
side. If False (the default), use absolute timestamps.
"""
import pandas as pd
ss = {}
for label, s_list in self.samples.items():
assert s_list # There's always at least one sample
s = pd.DataFrame(s_list).set_index(0)[1]
s.index = pd.to_datetime(s.index, unit="s")
s.name = label
if align:
# convert datetime to timedelta from the first sample
s.index -= s.index[0]
ss[label] = s[~s.index.duplicated()] # type: ignore[attr-defined]
df = pd.DataFrame(ss)
if len(ss) > 1:
# Forward-fill NaNs in the middle of a series created either by overlapping
# sampling time range or by align=True. Do not ffill series beyond their
# last sample.
df = df.ffill().where(~pd.isna(df.bfill()))
return df
def plot(self, *, align: bool = False, **kwargs: Any) -> Any:
"""Plot data series collected so far
Parameters
==========
align : bool (optional)
See :meth:`~distributed.diagnostics.MemorySampler.to_pandas`
kwargs
Passed verbatim to :meth:`pandas.DataFrame.plot`
Returns
=======
Output of :meth:`pandas.DataFrame.plot`
"""
df = self.to_pandas(align=align)
resampled = df.resample("1s").nearest() / 2**30
# If resampling collapses data onto one point, we'll run into
# https://stackoverflow.com/questions/58322744/matplotlib-userwarning-attempting-to-set-identical-left-right-737342-0
# This should only happen in tests since users typically sample for more
# than a second
if len(resampled) == 1:
resampled = df.resample("1ms").nearest() / 2**30
return resampled.plot(
xlabel="time",
ylabel="Cluster memory (GiB)",
**kwargs,
)
| MemorySampler |
python | django__django | tests/messages_tests/test_api.py | {
"start": 126,
"end": 1483
} | class ____(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.request = self.rf.request()
self.storage = DummyStorage()
def test_ok(self):
msg = "some message"
self.request._messages = self.storage
messages.add_message(self.request, messages.DEBUG, msg)
[message] = self.storage.store
self.assertEqual(msg, message.message)
def test_request_is_none(self):
msg = "add_message() argument must be an HttpRequest object, not 'NoneType'."
self.request._messages = self.storage
with self.assertRaisesMessage(TypeError, msg):
messages.add_message(None, messages.DEBUG, "some message")
self.assertEqual(self.storage.store, [])
def test_middleware_missing(self):
msg = (
"You cannot add messages without installing "
"django.contrib.messages.middleware.MessageMiddleware"
)
with self.assertRaisesMessage(messages.MessageFailure, msg):
messages.add_message(self.request, messages.DEBUG, "some message")
self.assertEqual(self.storage.store, [])
def test_middleware_missing_silently(self):
messages.add_message(
self.request, messages.DEBUG, "some message", fail_silently=True
)
self.assertEqual(self.storage.store, [])
| ApiTests |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py | {
"start": 2427,
"end": 2974
} | class ____(Wav2Vec2ConformerRotaryPositionalEmbedding):
def __init__(self, config):
nn.Module.__init__(self)
dim = config.hidden_size // config.num_attention_heads
base = config.rotary_embedding_base
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
# Ignore copy
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.cached_sequence_length = None
self.cached_rotary_positional_embedding = None
| Wav2Vec2BertRotaryPositionalEmbedding |
python | dask__distributed | distributed/tests/test_active_memory_manager.py | {
"start": 44434,
"end": 49347
} | class ____(ActiveMemoryManagerPolicy):
"""Inanely suggest to drop every single key in the cluster"""
def __init__(self):
self.i = 0
def run(self):
for ts in self.manager.scheduler.tasks.values():
# Instead of yielding ("drop", ts, None) for each worker, which would result
# in semi-predictable output about which replica survives, randomly choose a
# different survivor at each AMM run.
candidates = list(ts.who_has or ())
random.shuffle(candidates)
for ws in candidates:
yield "drop", ts, {ws}
# Stop running after ~2s
self.i += 1
if self.i == 20:
self.manager.policies.remove(self)
async def tensordot_stress(c, s):
pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
rng = da.random.RandomState(0)
a = rng.random((10, 10), chunks=(1, 1))
# dask.array.core.PerformanceWarning: Increasing number of chunks by factor of 10
with warnings.catch_warnings():
warnings.simplefilter("ignore")
b = (a @ a.T).sum().round(3)
assert await c.compute(b) == 245.394
expected_tasks = -1
for _, msg in await c.get_events("scheduler"):
if msg["action"] == "update-graph":
assert msg["status"] == "OK", msg
expected_tasks = msg["metrics"]["tasks"]
break
else:
raise RuntimeError("Expected 'update_graph' event not found")
# Test that we didn't recompute any tasks during the stress test
await async_poll_for(lambda: not s.tasks, timeout=5)
assert sum(t.start == "memory" for t in s.transition_log) == expected_tasks
@pytest.mark.slow
@gen_cluster(
client=True,
nthreads=[("", 1)] * 4,
config=NO_AMM,
)
async def test_noamm_stress(c, s, *workers):
"""Test the tensordot_stress helper without AMM. This is to figure out if a
stability issue is AMM-specific or not.
"""
await tensordot_stress(c, s)
@pytest.mark.slow
@gen_cluster(
client=True,
nthreads=[("", 1)] * 4,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.1,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [
{"class": "distributed.tests.test_active_memory_manager.DropEverything"},
],
},
)
async def test_drop_stress(c, s, *workers):
"""A policy which suggests dropping everything won't break a running computation,
but only slow it down.
See also: test_ReduceReplicas_stress
"""
await tensordot_stress(c, s)
@pytest.mark.slow
@gen_cluster(
client=True,
nthreads=[("", 1)] * 4,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.1,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [
{"class": "distributed.active_memory_manager.ReduceReplicas"},
],
},
)
async def test_ReduceReplicas_stress(c, s, *workers):
"""Running ReduceReplicas compulsively won't break a running computation. Unlike
test_drop_stress above, this test does not stop running after a few seconds - the
policy must not disrupt the computation too much.
"""
await tensordot_stress(c, s)
@pytest.mark.slow
@pytest.mark.parametrize("use_ReduceReplicas", [False, True])
@gen_cluster(
client=True,
nthreads=[("", 1)] * 10,
config={
"distributed.scheduler.active-memory-manager.start": True,
"distributed.scheduler.active-memory-manager.interval": 0.1,
"distributed.scheduler.active-memory-manager.measure": "managed",
"distributed.scheduler.active-memory-manager.policies": [
{"class": "distributed.active_memory_manager.ReduceReplicas"},
],
},
scheduler_kwargs={"transition_counter_max": 500_000},
worker_kwargs={"transition_counter_max": 500_000},
)
async def test_RetireWorker_stress(c, s, *workers, use_ReduceReplicas):
"""It is safe to retire the best part of a cluster in the middle of a computation"""
if not use_ReduceReplicas:
s.extensions["amm"].policies.clear()
addrs = list(s.workers)
random.shuffle(addrs)
print(f"Removing all workers except {addrs[9]}")
tasks = [asyncio.create_task(tensordot_stress(c, s))]
await asyncio.sleep(1)
tasks.append(asyncio.create_task(c.retire_workers(addrs[0:2])))
await asyncio.sleep(1)
tasks.append(asyncio.create_task(c.retire_workers(addrs[2:5])))
await asyncio.sleep(1)
tasks.append(asyncio.create_task(c.retire_workers(addrs[5:9])))
await asyncio.gather(*tasks)
assert set(s.workers) == {addrs[9]}
| DropEverything |
python | numpy__numpy | numpy/lib/tests/test_type_check.py | {
"start": 10277,
"end": 14458
} | class ____:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1)) / 0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
# perform the same tests but with nan, posinf and neginf keywords
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1)) / 0.,
nan=10, posinf=20, neginf=30)
assert_equal(vals, [30, 10, 20])
assert_all(np.isfinite(vals[[0, 2]]))
assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
vals = np.array((-1., 0, 1)) / 0.
result = nan_to_num(vals, copy=False)
assert_(result is vals)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
vals = np.array((-1., 0, 1)) / 0.
result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
assert_(result is vals)
assert_equal(vals, [30, 10, 20])
assert_all(np.isfinite(vals[[0, 2]]))
assert_equal(type(vals), np.ndarray)
def test_array(self):
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], int))
assert_equal(type(vals), np.ndarray)
vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
assert_array_equal(vals, np.array([1], int))
assert_equal(type(vals), np.ndarray)
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
assert_equal(type(vals), np.int_)
vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
assert_all(vals == 1)
assert_equal(type(vals), np.int_)
def test_float(self):
vals = nan_to_num(1.0)
assert_all(vals == 1.0)
assert_equal(type(vals), np.float64)
vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
assert_all(vals == 1.1)
assert_equal(type(vals), np.float64)
def test_complex_good(self):
vals = nan_to_num(1 + 1j)
assert_all(vals == 1 + 1j)
assert_equal(type(vals), np.complex128)
vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30)
assert_all(vals == 1 + 1j)
assert_equal(type(vals), np.complex128)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0 + 1.j) / 0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
assert_equal(type(vals), np.complex128)
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1 + 1.j) / 0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
assert_equal(type(vals), np.complex128)
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
def test_do_not_rewrite_previous_keyword(self):
# This is done to test that when, for instance, nan=np.inf then these
# values are not rewritten by posinf keyword to the posinf value.
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999)
assert_all(np.isfinite(vals[[0, 2]]))
assert_all(vals[0] < -1e10)
assert_equal(vals[[1, 2]], [np.inf, 999])
assert_equal(type(vals), np.ndarray)
| TestNanToNum |
python | pypa__warehouse | tests/unit/cli/test_shell.py | {
"start": 140,
"end": 976
} | class ____:
def test_bpython(self, monkeypatch):
monkeypatch.setitem(sys.modules, "bpython", pretend.stub())
assert shell.autodetect() == "bpython"
def test_bpython_over_ipython(self, monkeypatch):
monkeypatch.setitem(sys.modules, "bpython", pretend.stub())
monkeypatch.setitem(sys.modules, "IPython", pretend.stub())
assert shell.autodetect() == "bpython"
def test_ipython(self, monkeypatch):
monkeypatch.setitem(sys.modules, "IPython", pretend.stub())
assert shell.autodetect() == "ipython"
def test_plain(self, monkeypatch):
"""Neither bpython nor ipython are installed."""
monkeypatch.setitem(sys.modules, "bpython", None)
monkeypatch.setitem(sys.modules, "IPython", None)
assert shell.autodetect() == "plain"
| TestAutoDetection |
python | realpython__materials | wordcount/tests/realpython/readme.py | {
"start": 77,
"end": 582
} | class ____:
def __init__(self, config: Config) -> None:
path = config.rootpath / "README.md"
if path.exists():
self._content = path.read_text(encoding="utf-8")
else:
self._content = ""
self._folder_name = config.rootpath.name
@cached_property
def exercise_name(self) -> str:
if match := re.search(r"^# (.+)", self._content):
return match.group(1).title()
else:
return self._folder_name.title()
| Readme |
python | google__jax | tests/pallas/tpu_ops_test.py | {
"start": 1964,
"end": 2364
} | class ____(jtu.JaxTestCase):
INTERPRET = False
def setUp(self):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("Test only supported on TPU.")
super().setUp()
@classmethod
def pallas_call(cls, *args, **kwargs):
return pl.pallas_call(*args, interpret=cls.INTERPRET, **kwargs)
@jtu.thread_unsafe_test_class(condition=not jtu.hypothesis_is_thread_safe())
| PallasBaseTest |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 535234,
"end": 542347
} | class ____(NumBinopNode):
# '**' operator.
is_cpow = None
type_was_inferred = False # was the result type affected by cpow==False?
# Intended to allow it to be changed if the node is coerced.
def _check_cpow(self, env):
if self.is_cpow is not None:
return # already set
self.is_cpow = env.directives['cpow']
def infer_type(self, env):
self._check_cpow(env)
return super().infer_type(env)
def analyse_types(self, env):
self._check_cpow(env)
return super().analyse_types(env)
def infer_builtin_types_operation(self, type1, type2):
# TODO
return None
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = self.type.binary_op('**')
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
UtilityCode.load_cached("IntPow", "CMath.c").specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def compute_c_result_type(self, type1, type2):
from numbers import Real
c_result_type = None
op1_is_definitely_positive = (
isinstance(self.operand1.constant_result, Real)
and self.operand1.constant_result >= 0
) or (
type1.is_int and type1.signed == 0 # definitely unsigned
)
type2_is_int = type2.is_int or (
self.operand2.has_constant_result() and
isinstance(self.operand2.constant_result, Real) and
int(self.operand2.constant_result) == self.operand2.constant_result
)
needs_widening = False
if self.is_cpow:
c_result_type = super().compute_c_result_type(type1, type2)
if not self.operand2.has_constant_result():
needs_widening = (
isinstance(self.operand2.constant_result, int) and self.operand2.constant_result < 0
)
elif op1_is_definitely_positive or type2_is_int: # cpow==False
# if type2 is an integer then we can't end up going from real to complex
c_result_type = super().compute_c_result_type(type1, type2)
if not self.operand2.has_constant_result():
needs_widening = type2.is_int and type2.signed
if needs_widening:
self.type_was_inferred = True
else:
needs_widening = (
isinstance(self.operand2.constant_result, int) and self.operand2.constant_result < 0
)
elif self.c_types_okay(type1, type2):
# Allowable result types are double or complex double.
# Return the special "soft complex" type to store it as a
# complex number but with specialized coercions to Python
c_result_type = PyrexTypes.soft_complex_type
self.type_was_inferred = True
if needs_widening:
c_result_type = PyrexTypes.widest_numeric_type(c_result_type, PyrexTypes.c_double_type)
return c_result_type
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, int) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super().py_operation_function(code)
def coerce_to(self, dst_type, env):
if dst_type == self.type:
return self
if (self.is_cpow is None and self.type_was_inferred and
(dst_type.is_float or dst_type.is_int)):
# if we're trying to coerce this directly to a C float or int
# then fall back to the cpow == True behaviour since this is
# almost certainly the user intent.
# However, ensure that the operand types are suitable C types
if self.type is PyrexTypes.soft_complex_type:
def check_types(operand, recurse=True):
if operand.type.is_float or operand.type.is_int:
return True, operand
if recurse and isinstance(operand, CoerceToComplexNode):
return check_types(operand.arg, recurse=False), operand.arg
return False, None
msg_detail = "a non-complex C numeric type"
elif dst_type.is_int:
def check_types(operand):
if operand.type.is_int:
return True, operand
else:
# int, int doesn't seem to involve coercion nodes
return False, None
msg_detail = "an integer C numeric type"
else:
def check_types(operand):
return False, None
check_op1, op1 = check_types(self.operand1)
check_op2, op2 = check_types(self.operand2)
if check_op1 and check_op2:
warning(self.pos, "Treating '**' as if 'cython.cpow(True)' since it "
"is directly assigned to a %s. "
"This is likely to be fragile and we recommend setting "
"'cython.cpow' explicitly." % msg_detail)
self.is_cpow = True
self.operand1 = op1
self.operand2 = op2
result = self.analyse_types(env)
if result.type != dst_type:
result = result.coerce_to(dst_type, env)
return result
return super().coerce_to(dst_type, env)
| PowNode |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-good-partitions.py | {
"start": 57,
"end": 486
} | class ____(object):
def numberOfGoodPartitions(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
lookup = {x:i for i, x in enumerate(nums)}
result = 1
right = cnt = 0
for left, x in enumerate(nums):
if left == right+1:
cnt += 1
right = max(right, lookup[x])
return pow(2, cnt, MOD)
| Solution |
python | giampaolo__psutil | tests/test_testutils.py | {
"start": 3729,
"end": 5011
} | class ____(PsutilTestCase):
def test_wait_for_pid(self):
wait_for_pid(os.getpid())
nopid = max(psutil.pids()) + 99999
with mock.patch('tests.retry.__iter__', return_value=iter([0])):
with pytest.raises(psutil.NoSuchProcess):
wait_for_pid(nopid)
def test_wait_for_file(self):
testfn = self.get_testfn()
with open(testfn, 'w') as f:
f.write('foo')
wait_for_file(testfn)
assert not os.path.exists(testfn)
def test_wait_for_file_empty(self):
testfn = self.get_testfn()
with open(testfn, 'w'):
pass
wait_for_file(testfn, empty=True)
assert not os.path.exists(testfn)
def test_wait_for_file_no_file(self):
testfn = self.get_testfn()
with mock.patch('tests.retry.__iter__', return_value=iter([0])):
with pytest.raises(OSError):
wait_for_file(testfn)
def test_wait_for_file_no_delete(self):
testfn = self.get_testfn()
with open(testfn, 'w') as f:
f.write('foo')
wait_for_file(testfn, delete=False)
assert os.path.exists(testfn)
def test_call_until(self):
call_until(lambda: 1)
# TODO: test for timeout
| TestSyncTestUtils |
python | pytorch__pytorch | torch/_numpy/_ndarray.py | {
"start": 8071,
"end": 8339
} | class ____:
pass
_Unspecified.unspecified = _Unspecified()
###############################################################
# ndarray class #
###############################################################
| _Unspecified |
python | RaRe-Technologies__gensim | gensim/models/fasttext.py | {
"start": 29013,
"end": 29172
} | class ____(utils.SaveLoad):
"""This is a redundant class. It exists only to maintain backwards compatibility
with older gensim versions."""
| FastTextVocab |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/action_model.py | {
"start": 934,
"end": 10749
} | class ____(nn.Module):
def __init__(
self,
hidden_size: int,
action_spec: ActionSpec,
conditional_sigma: bool = False,
tanh_squash: bool = False,
deterministic: bool = False,
):
"""
A torch module that represents the action space of a policy. The ActionModel may contain
a continuous distribution, a discrete distribution or both where construction depends on
the action_spec. The ActionModel uses the encoded input of the network body to parameterize
these distributions. The forward method of this module outputs the action, log probs,
and entropies given the encoding from the network body.
:params hidden_size: Size of the input to the ActionModel.
:params action_spec: The ActionSpec defining the action space dimensions and distributions.
:params conditional_sigma: Whether or not the std of a Gaussian is conditioned on state.
:params tanh_squash: Whether to squash the output of a Gaussian with the tanh function.
:params deterministic: Whether to select actions deterministically in policy.
"""
super().__init__()
self.encoding_size = hidden_size
self.action_spec = action_spec
self._continuous_distribution = None
self._discrete_distribution = None
if self.action_spec.continuous_size > 0:
self._continuous_distribution = GaussianDistribution(
self.encoding_size,
self.action_spec.continuous_size,
conditional_sigma=conditional_sigma,
tanh_squash=tanh_squash,
)
if self.action_spec.discrete_size > 0:
self._discrete_distribution = MultiCategoricalDistribution(
self.encoding_size, self.action_spec.discrete_branches
)
# During training, clipping is done in TorchPolicy, but we need to clip before ONNX
# export as well.
self.clip_action = not tanh_squash
self._deterministic = deterministic
def _sample_action(self, dists: DistInstances) -> AgentAction:
"""
Samples actions from a DistInstances tuple
:params dists: The DistInstances tuple
:return: An AgentAction corresponding to the actions sampled from the DistInstances
"""
continuous_action: Optional[torch.Tensor] = None
discrete_action: Optional[List[torch.Tensor]] = None
# This checks None because mypy complains otherwise
if dists.continuous is not None:
if self._deterministic:
continuous_action = dists.continuous.deterministic_sample()
else:
continuous_action = dists.continuous.sample()
if dists.discrete is not None:
discrete_action = []
if self._deterministic:
for discrete_dist in dists.discrete:
discrete_action.append(discrete_dist.deterministic_sample())
else:
for discrete_dist in dists.discrete:
discrete_action.append(discrete_dist.sample())
return AgentAction(continuous_action, discrete_action)
def _get_dists(self, inputs: torch.Tensor, masks: torch.Tensor) -> DistInstances:
"""
Creates a DistInstances tuple using the continuous and discrete distributions
:params inputs: The encoding from the network body
:params masks: Action masks for discrete actions
:return: A DistInstances tuple
"""
continuous_dist: Optional[DistInstance] = None
discrete_dist: Optional[List[DiscreteDistInstance]] = None
# This checks None because mypy complains otherwise
if self._continuous_distribution is not None:
continuous_dist = self._continuous_distribution(inputs)
if self._discrete_distribution is not None:
discrete_dist = self._discrete_distribution(inputs, masks)
return DistInstances(continuous_dist, discrete_dist)
def _get_probs_and_entropy(
self, actions: AgentAction, dists: DistInstances
) -> Tuple[ActionLogProbs, torch.Tensor]:
"""
Computes the log probabilites of the actions given distributions and entropies of
the given distributions.
:params actions: The AgentAction
:params dists: The DistInstances tuple
:return: An ActionLogProbs tuple and a torch tensor of the distribution entropies.
"""
entropies_list: List[torch.Tensor] = []
continuous_log_prob: Optional[torch.Tensor] = None
discrete_log_probs: Optional[List[torch.Tensor]] = None
all_discrete_log_probs: Optional[List[torch.Tensor]] = None
# This checks None because mypy complains otherwise
if dists.continuous is not None:
continuous_log_prob = dists.continuous.log_prob(actions.continuous_tensor)
entropies_list.append(dists.continuous.entropy())
if dists.discrete is not None:
discrete_log_probs = []
all_discrete_log_probs = []
for discrete_action, discrete_dist in zip(
actions.discrete_list, dists.discrete # type: ignore
):
discrete_log_prob = discrete_dist.log_prob(discrete_action)
entropies_list.append(discrete_dist.entropy())
discrete_log_probs.append(discrete_log_prob)
all_discrete_log_probs.append(discrete_dist.all_log_prob())
action_log_probs = ActionLogProbs(
continuous_log_prob, discrete_log_probs, all_discrete_log_probs
)
entropies = torch.cat(entropies_list, dim=1)
return action_log_probs, entropies
def evaluate(
self, inputs: torch.Tensor, masks: torch.Tensor, actions: AgentAction
) -> Tuple[ActionLogProbs, torch.Tensor]:
"""
Given actions and encoding from the network body, gets the distributions and
computes the log probabilites and entropies.
:params inputs: The encoding from the network body
:params masks: Action masks for discrete actions
:params actions: The AgentAction
:return: An ActionLogProbs tuple and a torch tensor of the distribution entropies.
"""
dists = self._get_dists(inputs, masks)
log_probs, entropies = self._get_probs_and_entropy(actions, dists)
# Use the sum of entropy across actions, not the mean
entropy_sum = torch.sum(entropies, dim=1)
return log_probs, entropy_sum
def get_action_out(self, inputs: torch.Tensor, masks: torch.Tensor) -> torch.Tensor:
"""
Gets the tensors corresponding to the output of the policy network to be used for
inference. Called by the Actor's forward call.
:params inputs: The encoding from the network body
:params masks: Action masks for discrete actions
:return: A tuple of torch tensors corresponding to the inference output
"""
dists = self._get_dists(inputs, masks)
continuous_out, discrete_out, action_out_deprecated = None, None, None
deterministic_continuous_out, deterministic_discrete_out = (
None,
None,
) # deterministic actions
if self.action_spec.continuous_size > 0 and dists.continuous is not None:
continuous_out = dists.continuous.exported_model_output()
action_out_deprecated = continuous_out
deterministic_continuous_out = dists.continuous.deterministic_sample()
if self.clip_action:
continuous_out = torch.clamp(continuous_out, -3, 3) / 3
action_out_deprecated = continuous_out
deterministic_continuous_out = (
torch.clamp(deterministic_continuous_out, -3, 3) / 3
)
if self.action_spec.discrete_size > 0 and dists.discrete is not None:
discrete_out_list = [
discrete_dist.exported_model_output()
for discrete_dist in dists.discrete
]
discrete_out = torch.cat(discrete_out_list, dim=1)
action_out_deprecated = torch.cat(discrete_out_list, dim=1)
deterministic_discrete_out_list = [
discrete_dist.deterministic_sample() for discrete_dist in dists.discrete
]
deterministic_discrete_out = torch.cat(
deterministic_discrete_out_list, dim=1
)
# deprecated action field does not support hybrid action
if self.action_spec.continuous_size > 0 and self.action_spec.discrete_size > 0:
action_out_deprecated = None
return (
continuous_out,
discrete_out,
action_out_deprecated,
deterministic_continuous_out,
deterministic_discrete_out,
)
def forward(
self, inputs: torch.Tensor, masks: torch.Tensor
) -> Tuple[AgentAction, ActionLogProbs, torch.Tensor]:
"""
The forward method of this module. Outputs the action, log probs,
and entropies given the encoding from the network body.
:params inputs: The encoding from the network body
:params masks: Action masks for discrete actions
:return: Given the input, an AgentAction of the actions generated by the policy and the corresponding
ActionLogProbs and entropies.
"""
dists = self._get_dists(inputs, masks)
actions = self._sample_action(dists)
log_probs, entropies = self._get_probs_and_entropy(actions, dists)
# Use the sum of entropy across actions, not the mean
entropy_sum = torch.sum(entropies, dim=1)
return (actions, log_probs, entropy_sum)
| ActionModel |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/call_graph.py | {
"start": 2429,
"end": 3454
} | class ____:
class A:
def __init__(self, x): pass
def __call__(self): pass
class B:
def __init__(self, x): pass
def __call__(self): pass
class C:
def __init__(self, x): pass
def __call__(self): pass
def test_match_type_of(x: Union[namespace.A, namespace.B, namespace.C]):
# The `match` is currently translated into:
# ```
# if type(x) == A: ..
# elif type(x) == B: ...
# elif type(x) == C: ...
# ```
# After each implicit `else`, we learn that the type is not A, or B, or C
# Hence each time we compute the call graph for `type(x)`, we add different
# higher order parameters (first one has all A,B,C, second one has B,C, etc.)
# This leads to having different callees for the same original expression,
# breaking a call graph invariant.
match type(x):
case namespace.A:
_test_sink(x)
case namespace.B:
_test_sink(x)
case namespace.C:
_test_sink(x)
| namespace |
python | django__django | tests/fixtures_regress/models.py | {
"start": 5106,
"end": 5311
} | class ____(models.Model):
name = models.CharField(max_length=255, unique=True)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ["fixtures_regress.circle3"]
| Circle3 |
python | getsentry__sentry | src/sentry/api/serializers/models/project.py | {
"start": 36610,
"end": 46931
} | class ____(ProjectWithTeamSerializer):
def get_attrs(
self, item_list: Sequence[Project], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> dict[Project, dict[str, Any]]:
attrs = super().get_attrs(item_list, user)
queryset = ProjectOption.objects.filter(project__in=item_list, key__in=OPTION_KEYS)
options_by_project: dict[int, dict[str, Any]] = defaultdict(dict)
for option in queryset.iterator():
options_by_project[option.project_id][option.key] = option.value
orgs = {d["id"]: d for d in serialize(list({i.organization for i in item_list}), user)}
# Only fetch the latest release version key for each project to cut down on response size
latest_release_versions = _get_project_to_release_version_mapping(item_list)
for item in item_list:
attrs[item].update(
{
"latest_release": latest_release_versions.get(item.id),
"org": orgs[str(item.organization_id)],
"options": options_by_project[item.id],
"processing_issues": 0,
"highlight_preset": get_highlight_preset_for_project(item),
}
)
return attrs
def serialize(
self,
obj: Project,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> DetailedProjectResponse:
from sentry.plugins.base import plugins
base = super().serialize(obj, attrs, user)
custom_symbol_sources_json = attrs["options"].get("sentry:symbol_sources")
try:
sources = parse_sources(custom_symbol_sources_json, filter_appconnect=False)
except Exception:
# In theory sources stored on the project should be valid. If they are invalid, we don't
# want to abort serialization just for sources, so just return an empty list instead of
# returning sources with their secrets included.
serialized_sources = "[]"
else:
redacted_sources = redact_source_secrets(sources)
serialized_sources = orjson.dumps(redacted_sources, option=orjson.OPT_UTC_Z).decode()
sample_rate = None
if has_custom_dynamic_sampling(obj.organization):
if is_project_mode_sampling(obj.organization):
sample_rate = obj.get_option("sentry:target_sample_rate")
else:
sample_rate = obj.organization.get_option(
"sentry:target_sample_rate", TARGET_SAMPLE_RATE_DEFAULT
)
elif has_dynamic_sampling(obj.organization):
sample_rate = quotas.backend.get_blended_sample_rate(
organization_id=obj.organization.id
)
data: DetailedProjectResponse = {
**base,
"latestRelease": attrs["latest_release"],
"options": self.format_options(attrs),
"digestsMinDelay": attrs["options"].get(
"digests:mail:minimum_delay", digests.minimum_delay
),
"digestsMaxDelay": attrs["options"].get(
"digests:mail:maximum_delay", digests.maximum_delay
),
"subjectPrefix": attrs["options"].get(
"mail:subject_prefix", options.get("mail.subject-prefix")
),
"allowedDomains": attrs["options"].get("sentry:origins", ["*"]),
"resolveAge": int(attrs["options"].get("sentry:resolve_age", 0)),
"dataScrubber": bool(attrs["options"].get("sentry:scrub_data", True)),
"dataScrubberDefaults": bool(attrs["options"].get("sentry:scrub_defaults", True)),
"safeFields": attrs["options"].get("sentry:safe_fields", []),
"storeCrashReports": convert_crashreport_count(
attrs["options"].get("sentry:store_crash_reports"), allow_none=True
),
"sensitiveFields": attrs["options"].get("sentry:sensitive_fields", []),
"subjectTemplate": attrs["options"].get("mail:subject_template")
or DEFAULT_SUBJECT_TEMPLATE.template,
"securityToken": attrs["options"].get("sentry:token") or obj.get_security_token(),
"securityTokenHeader": attrs["options"].get("sentry:token_header"),
"verifySSL": bool(attrs["options"].get("sentry:verify_ssl", False)),
"scrubIPAddresses": bool(attrs["options"].get("sentry:scrub_ip_address", False)),
"scrapeJavaScript": bool(attrs["options"].get("sentry:scrape_javascript", True)),
"highlightTags": attrs["options"].get(
"sentry:highlight_tags",
attrs["highlight_preset"].get("tags", []),
),
"highlightContext": attrs["options"].get(
"sentry:highlight_context",
attrs["highlight_preset"].get("context", {}),
),
"highlightPreset": attrs["highlight_preset"],
"groupingConfig": self.get_value_with_default(attrs, "sentry:grouping_config"),
"groupingEnhancements": self.get_value_with_default(
attrs, "sentry:grouping_enhancements"
),
"derivedGroupingEnhancements": self.get_value_with_default(
attrs, "sentry:derived_grouping_enhancements"
),
"secondaryGroupingExpiry": self.get_value_with_default(
attrs, "sentry:secondary_grouping_expiry"
),
"secondaryGroupingConfig": self.get_value_with_default(
attrs, "sentry:secondary_grouping_config"
),
"fingerprintingRules": self.get_value_with_default(
attrs, "sentry:fingerprinting_rules"
),
"organization": attrs["org"],
"plugins": serialize(
[
plugin
for plugin in plugins.configurable_for_project(obj, version=None)
if plugin.has_project_conf()
],
user,
PluginSerializer(obj),
),
"platforms": attrs["platforms"],
"processingIssues": attrs["processing_issues"],
"defaultEnvironment": attrs["options"].get("sentry:default_environment"),
"relayPiiConfig": attrs["options"].get("sentry:relay_pii_config"),
"builtinSymbolSources": self.get_value_with_default(
attrs, "sentry:builtin_symbol_sources"
),
"dynamicSamplingBiases": self.get_value_with_default(
attrs, "sentry:dynamic_sampling_biases"
),
"symbolSources": serialized_sources,
"isDynamicallySampled": sample_rate is not None and sample_rate < 1.0,
"autofixAutomationTuning": self.get_value_with_default(
attrs, "sentry:autofix_automation_tuning"
),
"seerScannerAutomation": self.get_value_with_default(
attrs, "sentry:seer_scanner_automation"
),
"debugFilesRole": attrs["options"].get("sentry:debug_files_role"),
}
if has_tempest_access(obj.organization):
data["tempestFetchScreenshots"] = attrs["options"].get(
"sentry:tempest_fetch_screenshots", False
)
return data
def format_options(self, attrs: Mapping[str, Any]) -> dict[str, Any]:
options = attrs["options"]
return {
"sentry:csp_ignored_sources_defaults": bool(
options.get("sentry:csp_ignored_sources_defaults", True)
),
"sentry:csp_ignored_sources": "\n".join(
options.get("sentry:csp_ignored_sources", []) or []
),
"filters:blacklisted_ips": "\n".join(options.get("sentry:blacklisted_ips", [])),
# This option was defaulted to string but was changed at runtime to a boolean due to an error in the
# implementation. In order to bring it back to a string, we need to repair on read stored options. This is
# why the value true is determined by either "1" or True.
"filters:react-hydration-errors": options.get("filters:react-hydration-errors", "1")
in ("1", True),
"filters:chunk-load-error": options.get("filters:chunk-load-error", "1") == "1",
f"filters:{FilterTypes.RELEASES}": "\n".join(
options.get(f"sentry:{FilterTypes.RELEASES}", [])
),
f"filters:{FilterTypes.ERROR_MESSAGES}": "\n".join(
options.get(f"sentry:{FilterTypes.ERROR_MESSAGES}", [])
),
f"filters:{FilterTypes.LOG_MESSAGES}": "\n".join(
options.get(f"sentry:{FilterTypes.LOG_MESSAGES}", [])
),
f"filters:{FilterTypes.TRACE_METRIC_NAMES}": "\n".join(
options.get(f"sentry:{FilterTypes.TRACE_METRIC_NAMES}", [])
),
"feedback:branding": options.get("feedback:branding", "1") == "1",
"sentry:feedback_user_report_notifications": bool(
self.get_value_with_default(attrs, "sentry:feedback_user_report_notifications")
),
"sentry:feedback_ai_spam_detection": bool(
self.get_value_with_default(attrs, "sentry:feedback_ai_spam_detection")
),
"sentry:replay_rage_click_issues": self.get_value_with_default(
attrs, "sentry:replay_rage_click_issues"
),
"sentry:replay_hydration_error_issues": self.get_value_with_default(
attrs, "sentry:replay_hydration_error_issues"
),
"sentry:toolbar_allowed_origins": "\n".join(
self.get_value_with_default(attrs, "sentry:toolbar_allowed_origins") or []
),
"quotas:spike-protection-disabled": options.get("quotas:spike-protection-disabled"),
}
def get_value_with_default(self, attrs, key):
value = attrs["options"].get(key)
if value is not None:
return value
return projectoptions.get_well_known_default(
key, epoch=attrs["options"].get("sentry:option-epoch")
)
| DetailedProjectSerializer |
python | Pylons__pyramid | src/pyramid/config/assets.py | {
"start": 300,
"end": 3165
} | class ____(pkg_resources.DefaultProvider):
def __init__(self, module):
pkg_resources.DefaultProvider.__init__(self, module)
self.module_name = module.__name__
def _get_overrides(self):
reg = get_current_registry()
overrides = reg.queryUtility(IPackageOverrides, self.module_name)
return overrides
def get_resource_filename(self, manager, resource_name):
"""Return a true filesystem path for resource_name,
co-ordinating the extraction with manager, if the resource
must be unpacked to the filesystem.
"""
overrides = self._get_overrides()
if overrides is not None:
filename = overrides.get_filename(resource_name)
if filename is not None:
return filename
return pkg_resources.DefaultProvider.get_resource_filename(
self, manager, resource_name
)
def get_resource_stream(self, manager, resource_name):
"""Return a readable file-like object for resource_name."""
overrides = self._get_overrides()
if overrides is not None:
stream = overrides.get_stream(resource_name)
if stream is not None:
return stream
return pkg_resources.DefaultProvider.get_resource_stream(
self, manager, resource_name
)
def get_resource_string(self, manager, resource_name):
"""Return a string containing the contents of resource_name."""
overrides = self._get_overrides()
if overrides is not None:
string = overrides.get_string(resource_name)
if string is not None:
return string
return pkg_resources.DefaultProvider.get_resource_string(
self, manager, resource_name
)
def has_resource(self, resource_name):
overrides = self._get_overrides()
if overrides is not None:
result = overrides.has_resource(resource_name)
if result is not None:
return result
return pkg_resources.DefaultProvider.has_resource(self, resource_name)
def resource_isdir(self, resource_name):
overrides = self._get_overrides()
if overrides is not None:
result = overrides.isdir(resource_name)
if result is not None:
return result
return pkg_resources.DefaultProvider.resource_isdir(
self, resource_name
)
def resource_listdir(self, resource_name):
overrides = self._get_overrides()
if overrides is not None:
result = overrides.listdir(resource_name)
if result is not None:
return result
return pkg_resources.DefaultProvider.resource_listdir(
self, resource_name
)
@implementer(IPackageOverrides)
| OverrideProvider |
python | pytorch__pytorch | torch/_inductor/cudagraph_trees.py | {
"start": 28174,
"end": 73645
} | class ____:
"""
A single recording of a function into a CUDA Graph. Recordings of CUDA Graphs share a single memory pool
and are structured into a tree, where there is a single recording that can precede it (parent) and multiple
subsequent recordings that may follow (children). A node will have no parent if it is the first recording
in a tree; i.e., when it is first recorded, there are no live tensors from a previous recording which
would force a dependency.
On first recording, all of the live tensors in the current CUDA Graph Node path will be
reflected in the corresponding private pool. On subsequent executions, the caching allocator
is unaffected when the graph is replayed.
In order to support recording a subsequent cuda graph recording after execution of this graph,
we checkpoint the state of the memory pool so that it may later be resumed.
WrappedFunction should have already been warmed up prior to invocation.
See [setCheckpointPoolState] for further explanation, as well as
https://user-images.githubusercontent.com/13564/222815509-374f3400-f83d-4f7d-8fa6-4a092b3250bb.png
"""
def __init__(
self,
wrapped_function: WrappedFunction,
id: GraphID,
parent: Optional[CUDAGraphNode],
inputs: list[InputType],
cuda_graphs_pool: _POOL_HANDLE,
device_index: int,
stack_traces: Optional[StackTraces],
stream: torch.cuda.Stream,
mode: Optional[CompilationMode],
compile_id: Optional[CompileId],
) -> None:
assert isinstance(inputs, (list, tuple))
self.wrapped_function = wrapped_function
self.id = id
self.device = device_index
self.stack_traces = stack_traces
self.stream = stream
# Enable re-record a cudagraph when static tensor address changed.
# if not we should error when it changed.
self.rerecord_if_static_inputs_change = (
torch._dynamo.config.inline_inbuilt_nn_modules
or torch._inductor.config.triton.cudagraph_support_input_mutation
)
# if this is a root parent will be None. use weakref to prevent reference cycle
self._parent = weakref.ref(parent) if parent is not None else None
# reference to the shared memory pool for the entire cuda graphs tree
self.cuda_graphs_pool = cuda_graphs_pool
# A single wrapped function may be recorded multiple times if memory patterns or
# invariants change from one execution to the next
self.children: dict[FunctionID, list[CUDAGraphNode]] = defaultdict(list)
# StorageWeakRef maintains whether the Storage C++ object remains allocated,
# not whether the corresponding memory has been deallocated. In order
# to use them to track memory deallocations we must maintain a single StorageWeakRef
# for all Storages that reference that memory (even if we are constructing Storages
# that do not have a deallocator function). We maintain one single storage_cache
# as we execute any tree path. When we retrieve a storage from the cache we
# check that it is still alive, and we hash based on observed recording data ptr
# and storage cdata.
# we preserve a single reference to executed outputs that is then referenced
# in children to avoid children having to chase parent pointers in the hot path
# DO NOT reassign output_weakrefs, only call `clear()`
# Path is a series of nodes from root to the current node
self.outputs_weakrefs: OutputList[Optional[StorageWeakRefWrapper]] = []
self.path_weakrefs: LevelList[OutputList[Optional[StorageWeakRefWrapper]]] = [
node.outputs_weakrefs for node in self._path_from_root
]
self.path_stacktraces: LevelList[Optional[StackTraces]] = [
node.stack_traces for node in self._path_from_root
]
self.tensor_weakrefs: OutputList[Optional[TensorWeakRef]] = []
# tensors which are outputs of previous graphs in the tree
self.cudagraph_managed_idxs: list[int] = [
idx
for idx, t in enumerate(inputs)
if isinstance(t, torch.Tensor) and self._is_cuda_graph_recorded_tensor(t)
]
# (depth, offset) of live tensors which are alias of previous graph outputs
self.live_cudagraph_managed_path_refs: InputList[Optional[PathOutputIndex]] = [
(
self._is_alias_of_live_recorded_tensor(t)
if isinstance(t, torch.Tensor)
else None
)
for t in inputs
]
# when replay, preserve the liveness of an input if it AliasesPriorGraphOutput
# and also aliases an output of the current CUDAGraphNode
self.preserved_aliased_inputs: InputList[bool] = [False] * len(inputs)
self.static_input_idxs: list[int] = list(
OrderedSet(wrapped_function.static_input_idxs)
| OrderedSet(self.cudagraph_managed_idxs)
)
self.non_static_input_idx: LevelList[int] = [
i for i in range(len(inputs)) if i not in self.static_input_idxs
]
counters["inductor"]["cudagraph_recorded_non_static_inputs"] += len(
self.non_static_input_idx
)
self.non_managed_static_input_idxs: LevelList[int] = [
i
for i in wrapped_function.static_input_idxs
if i not in self.cudagraph_managed_idxs
]
def maybe_get_static_data_ptr(
idx: int,
inputs: list[InputType],
static_input_idxs: list[int],
) -> Optional[int]:
inp = inputs[idx]
if isinstance(inp, torch.Tensor) and idx in static_input_idxs:
return inp.data_ptr()
return None
self.static_input_data_ptrs: InputList[Optional[int]] = [
# pyrefly: ignore [bad-argument-type]
maybe_get_static_data_ptr(i, inputs, self.static_input_idxs)
for i in range(len(inputs))
]
# When we checkpoint, and free generations, we will be manually freeing the outputs
# of CUDAGraphNodes. We should not be freeing parameters, not do we need to account for
# their liveness (they are static), so we need to compute which outputs are aliases of
# parameters. Some static inputs are saved tensors from the forward that die in the backward.
# Their locations are static but lifetimes are not. We only include the persistent static
# data ptrs below because the non persistent data ptrs may be outputs of this record and
# fresh allocations.
# precompute expanded dims to avoid computing in the hot path
self.expanded_dims: list[list[int]] = [
get_expanded_dims(x)
if isinstance(x, torch.Tensor) and idx not in self.static_input_idxs
else []
for idx, x in enumerate(inputs)
]
# For each node in path, which outputs were observed to be live
# before invoking graph recording, and after graph recording
self.recorded_liveness_before_graph: LevelList[OutputList[bool]] = []
self.recorded_liveness_after_graph: LevelList[OutputList[bool]] = []
# List of tuples of (depth, output_index) that index into node at depth
# number of nodes from root and output_index of outputs. Will index into
# path_weakrefs.
self.expected_dead_indices_before_graph: list[PathOutputIndex] = []
self.expected_dead_indices_after_graph: list[PathOutputIndex] = []
# all live indices after graph recording
self.live_indices_after_graph: list[PathOutputIndex] = []
if self.parent is not None:
previous_liveness = self.parent.recorded_liveness_after_graph
curr_liveness = self._get_liveness(self.path_weakrefs)
different_indices = self._get_different_indices(
previous_liveness, curr_liveness
)
self.recorded_liveness_before_graph = curr_liveness
self.expected_dead_indices_before_graph = different_indices
rng_states = [inp for inp in inputs if isinstance(inp, torch.Generator)]
# pyrefly: ignore [bad-argument-type]
recording_inputs = self._allocate_and_copy_recording_inputs(inputs)
# recording inputs will copy over memory, so we can free non recording inputs
# pyrefly: ignore [missing-attribute]
inputs.clear()
del inputs
# graph used for recording model invocation
self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
# TODO: register_generator_state should potentially take explicit device
with torch.cuda.device(self.device):
for rng_state in rng_states:
self.graph.register_generator_state(rng_state)
# we allocate non-static inputs within the same memory pool as the CUDAGraph
# which we will record the model with. For memory efficiency, it is important
# to reclaim the input memory when the inputs are no longer live. To accomplish this,
# we reconstruct tensors at the correct data pointers of our inputs which are
# non owning and do not prevent deallocation. On subsequent executions, input values
# will be copied over to these tensors.
self.reconstructed_inputs: list[InputType] = [
self._reconstruct_from_tensor_metadata(self._tensor_metadata(x))
if isinstance(x, torch.Tensor)
else x
for x in recording_inputs
]
# DO THE RECORDING!!!
# We record the CUDA graph in the constructor of CUDAGraphNode, which
# gives you what the CPU side compute of the function would do. We
# don't throw the recording outputs away: their memory is
# correctly accounted for in the CUDAGraphs caching allocator. This
# means on the very FIRST run of the CUDA graph node, we can directly
# do more recording, because we have a valid caching allocator state.
# NB: This relies on run() being called immediately after the
# constructor, otherwise this optimization would not be valid.
# initialized below in _record
self.checkpointed_caching_state: Optional[AllocatorState] = None
# Output Storage Alias information, can be:
# - A new, unaliased storage, or the output is None
# - An alias of an output of a prior graph
# - An alias of an output already created in the reconstructed outputs
# This is None if the output in question is an int
self.output_storage_alias: OutputList[Optional[OutputAliasInfo]] = []
# is the output Storage unaliased in subsequent outputs, of all subsequent paths
# if it is, we cached the output tensor and adjust storage liveness tracking to also
# check if the output tensor does not have an additional python reference.
# If a descendent node discovers it has an alias of a prior output, then the output
# will no longer be cached in the ancestor.
# The large majority of tensors are unaliased, and preserving aliased output tensors would add
# significant additional complexity with marginal gains
# The cached tensor outputs are added on the first execution, and cleared whenever we need
# to do subsequent recording
self.unaliased_in_all_paths: OutputList[bool] = []
self.cached_tensor_outputs: OutputList[Optional[Tensor]] = []
# if an output aliases a static, persistent input then the corresponding Tensor will
# be set here. These are different than cached tensors, because they are tensors that
# are aliases of parameters that are always live.
self.static_output_tensors: OutputList[Optional[Tensor]] = []
# Cleared after recording
with dynamo_timed_cudagraph("CUDAGraphNode.record", compile_id, mode):
self.recording_outputs: Optional[OutputType] = self._record(
wrapped_function.model, recording_inputs
)
self.outputs_metadata: OutputList[Union[dict[str, Any], int, None]] = []
# As with inputs, we do not want to keep the outputs permanently alive because that would prevent
# their memory being reclaimed in subsequent cuda graph recordings. We record the tensor metadata
# needed to reconstruct instead.
assert self.recording_outputs is not None
for out in self.recording_outputs:
if isinstance(out, torch.Tensor):
self.outputs_metadata.append(
self._tensor_metadata(out, ignore_storage_offset=False)
)
else:
assert isinstance(out, (int, type(None))), type(out)
self.outputs_metadata.append(out)
self.graph.replay()
def _copy_inputs_and_remove_from_src(
self, dsts: list[InputType], srcs: list[InputType]
) -> None:
dst_tensors = []
src_tensors = []
for idx in self.non_static_input_idx:
if not isinstance(srcs[idx], torch.Tensor):
continue
expanded_dims = self.expanded_dims[idx]
dst_tensors.append(index_expanded_dims(dsts[idx], expanded_dims)) # type: ignore[arg-type]
src_tensors.append(index_expanded_dims(srcs[idx], expanded_dims)) # type: ignore[arg-type]
srcs[idx] = None # type: ignore[call-overload]
# Fails on empty lists
if dst_tensors:
torch._foreach_copy_(dst_tensors, src_tensors)
def check_static_inputs_are_stable(self, new_inputs: list[InputType]) -> None:
# avoid checking managed tensor static points since we already checked those in check_invariants
if (
not self.rerecord_if_static_inputs_change
and not torch._C._tensors_data_ptrs_at_indices_equal(
new_inputs, # type: ignore[arg-type]
self.static_input_data_ptrs,
self.non_managed_static_input_idxs,
)
):
# this should error
error_msg = log_data_ptr_mismatch(
self.wrapped_function.placeholders,
new_inputs,
self.static_input_data_ptrs,
self.non_managed_static_input_idxs,
CheckInvariantStatus.StaticInputIdxMismatch,
)
torch._check(False, lambda: error_msg)
def run_first_inputs(self, new_inputs: list[InputType]) -> OutputType:
if config.triton.fast_path_cudagraph_asserts:
self.debug_check_invariants_before_invocation()
# graph is already invoked in the __init__
# inputs are copied over in _allocate_recording_inputs and subsequently cleared
assert len(new_inputs) == 0
outputs = self.recording_outputs
self.recording_outputs = None
assert outputs is not None
return outputs
def run(self, new_inputs: list[InputType]) -> OutputType:
self.check_static_inputs_are_stable(new_inputs)
self._copy_inputs_and_remove_from_src(self.reconstructed_inputs, new_inputs)
self.run_graph()
outputs = self.reconstruct_outputs()
new_inputs.clear()
if config.triton.fast_path_cudagraph_asserts:
self.debug_check_invariants_after_invocation()
if config.triton.force_cudagraph_sync:
torch.cuda.synchronize()
# Reset this to run the check in the future
self.static_inputs_stable = False
return outputs
def reconstruct_outputs(self) -> OutputType:
"Reconstruct output tensors according to their saved metadata and alias information"
# Cached tensors will not yet be set on the first execution
# They are also cleared in checkpointing, so if we checkpoint this node
# and then execute it again we will need to repopulate cached tensors
if not self.cached_tensor_outputs:
self._initialize_cached_tensors()
outputs: OutputType = []
for i, (storage_info, metadata) in enumerate(
zip(self.output_storage_alias, self.outputs_metadata)
):
if not isinstance(metadata, dict): # tensor metadata
assert isinstance(metadata, (int, type(None)))
outputs.append(metadata)
continue
cached_t = self.cached_tensor_outputs[i]
if cached_t is not None:
# this output represents a fresh allocated tensor.
# We return the same TensorImpl from run to run to avoid overhead.
# autograd.Function will reset the Autograd meta of output tensors
# as part of aot_autograd, but _backward_hooks are stored on tensors separately,
# so we need to manually reset hooks.
if cached_t._backward_hooks is not None:
cached_t._backward_hooks = None
# No need to update weakrefs, already correctly initialized
outputs.append(cached_t)
continue
static_t = self.static_output_tensors[i]
if static_t is not None:
assert self.outputs_weakrefs[i] is None
outputs.append(static_t)
continue
storage = self.prepare_alias_info_for_tensor_construction(
storage_info, metadata
)
if isinstance(storage, UntypedStorage) or storage is None:
out = self._reconstruct_from_tensor_metadata(metadata, storage)
else:
assert isinstance(storage, int)
out = self._reconstruct_from_tensor_metadata(
metadata, cast(torch.Tensor, outputs[storage]).untyped_storage()
)
outputs.append(out)
w = self.outputs_weakrefs[i]
assert w is not None
w.swap_weakref(out.untyped_storage()._weak_ref())
return outputs
def prepare_alias_info_for_tensor_construction(
self,
out_alias_info: Optional[OutputAliasInfo],
metadata: Union[dict[str, Any], int, None],
) -> Union[UntypedStorage, None, int]:
if (
isinstance(metadata, (int, type(None)))
or out_alias_info is UnaliasedStorage
):
return None
if isinstance(out_alias_info, AliasesPriorGraphOutput):
depth, existing_output_index = out_alias_info.index
ref = self.path_weakrefs[depth][existing_output_index]
assert ref is not None
return torch.UntypedStorage._new_with_weak_ptr(ref())
assert isinstance(out_alias_info, AliasesNewOutput)
return out_alias_info.index
def prepare_storages_for_construction(
self,
) -> list[Union[UntypedStorage, None, int]]:
output_storages = []
for output_storage_alias, metadata in zip(
self.output_storage_alias, self.outputs_metadata
):
output_storages.append(
self.prepare_alias_info_for_tensor_construction(
output_storage_alias, metadata
)
)
return output_storages
def run_graph(self) -> None:
assert self.graph is not None
self.graph.replay()
def all_outputs_are_dead(self) -> bool:
"All outputs of the path from this node to its root are dead"
for depth, output_index in self.live_indices_after_graph:
if is_live(self.path_weakrefs[depth][output_index]):
return False
return True
def _record(self, model: ModelType, inputs: list[InputType]) -> OutputType:
"Record the model"
assert self.graph is not None
def static_input_iter() -> Generator[torch.Tensor, None, None]:
for i in self.wrapped_function.static_input_idxs:
_inp = inputs[i]
if isinstance(
_inp, torch.Tensor
) and not self._is_cuda_graph_recorded_tensor(_inp):
yield _inp
# see: output_is_alias_of_persistent_static_inputs above
static_input_persistent_storage_ptrs: dict[int, StorageWeakRefWrapper] = {
inp.untyped_storage().data_ptr(): StorageWeakRefWrapper(inp)
for inp in itertools.chain(
static_input_iter(), self.wrapped_function.constants
)
}
if config.triton.slow_path_cudagraph_asserts:
# need to use parent live weakrefs because live_indices isn't set yet
memory = (
[] if self.parent is None else list(self.parent.path_live_weakrefs())
)
memory += [
StorageWeakRefWrapper(elem)
for i, elem in enumerate(inputs)
if isinstance(elem, torch.Tensor)
and i not in self.wrapped_function.static_input_idxs
and elem.untyped_storage().data_ptr() != 0
]
check_memory_pool(self.device, self.cuda_graphs_pool, memory)
with (
preserve_rng_state(),
torch.cuda.device(self.device),
clear_cublas_manager(),
torch.cuda.graph(
self.graph,
stream=self.stream,
pool=self.cuda_graphs_pool,
capture_error_mode="thread_local",
),
get_history_recording(),
):
static_outputs = model(inputs)
# running model should reclaim memory
assert len(inputs) == 0
if not isinstance(static_outputs, (list, tuple)):
static_outputs = (static_outputs,)
# pyrefly: ignore [bad-argument-type]
self._add_first_outputs(static_outputs, static_input_persistent_storage_ptrs)
# pyrefly: ignore [bad-return]
return static_outputs
def _add_first_outputs(
self,
outputs: OutputType,
static_input_persistent_storage_ptrs: dict[int, StorageWeakRefWrapper],
) -> None:
"Add the outputs from the first invocation of the node and set up metadata"
# getting liveness before we have added the outputs to path, so the length
# of the two lists is equal
prev_liveness = self.recorded_liveness_before_graph
curr_liveness = self._get_liveness(self.path_weakrefs)
delta = self._get_different_indices(prev_liveness, curr_liveness)
self.expected_dead_indices_after_graph = delta
assert len(self.outputs_weakrefs) == 0
# index from data pointer to index in outputs
output_new_storages_index: dict[StorageDataPtr, int] = {}
self.unaliased_in_all_paths = [False for _ in range(len(outputs))]
self.static_output_tensors = [None for _ in range(len(outputs))]
for i, o in enumerate(outputs):
if o is None or not isinstance(o, torch.Tensor):
self.output_storage_alias.append(UnaliasedStorage)
continue
torch._check(
o.is_cuda or o.untyped_storage().data_ptr() == 0,
lambda: (
"Expected all cuda outputs in cuda graph recording. Non cuda output "
f"from {self.stack_traces[i] if self.stack_traces else '(unknown)'}"
),
)
ref = static_input_persistent_storage_ptrs.get(
o.untyped_storage().data_ptr(), None
)
# also treat empty storages as static outputs because we do not need to manage their lifetime
# and they should not participate in checkpointing
is_empty_storage = o.untyped_storage().data_ptr() == 0
if (ref and ref() is not None) or is_empty_storage:
self.output_storage_alias.append(None)
self.static_output_tensors[i] = o
continue
path_ref = self._is_alias_of_live_recorded_tensor(o)
if path_ref is not None:
self._mark_prior_graph_output_as_aliased(path_ref)
for idx, inp_path_ref in enumerate(
self.live_cudagraph_managed_path_refs
):
if path_ref == inp_path_ref:
self.preserved_aliased_inputs[idx] = True
self.output_storage_alias.append(AliasesPriorGraphOutput(path_ref))
continue
if o.untyped_storage().data_ptr() in output_new_storages_index:
index = output_new_storages_index[o.untyped_storage().data_ptr()]
self.unaliased_in_all_paths[index] = False
self.output_storage_alias.append(AliasesNewOutput(index))
continue
output_new_storages_index[o.untyped_storage().data_ptr()] = i
self.output_storage_alias.append(UnaliasedStorage)
self.unaliased_in_all_paths[i] = True
if self.stack_traces is None:
self.stack_traces = [None for _ in range(len(outputs))]
else:
assert len(self.stack_traces) == len(outputs), (
"Wrong number of stack traces passed in"
)
assert not self.outputs_weakrefs
for out, static_output_tensor in zip(outputs, self.static_output_tensors):
if not isinstance(out, torch.Tensor) or static_output_tensor is not None:
self.outputs_weakrefs.append(None)
self.tensor_weakrefs.append(None)
else:
self.outputs_weakrefs.append(StorageWeakRefWrapper(out))
self.tensor_weakrefs.append(TensorWeakRef(out))
self.recorded_liveness_after_graph = self._get_liveness(self.path_weakrefs)
self.checkpointed_caching_state = torch._C._cuda_getCheckpointState(
self.device, self.cuda_graphs_pool
)
# now, get liveness with outputs added
for depth in range(len(self.path_weakrefs)):
for output_index in range(len(self.path_weakrefs[depth])):
if is_live(self.path_weakrefs[depth][output_index]):
self.live_indices_after_graph.append((depth, output_index))
self.debug_check_invariants_after_invocation()
if config.triton.slow_path_cudagraph_asserts:
check_memory_pool(
self.device, self.cuda_graphs_pool, list(self.path_live_weakrefs())
)
def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex) -> None:
"Remove a graph output from the unaliased, cached tensors in an ancestor node"
depth, output_index = index
node = list(self._path_from_root)[depth]
node.unaliased_in_all_paths[output_index] = False
x = self.path_weakrefs[depth][output_index]
assert x is not None
x.remove_extra_reference()
def _initialize_cached_tensors(self) -> None:
# we should not be clearing output_weakrefs, and they should be set in the first
# record run
assert len(self.outputs_weakrefs) == len(self.outputs_metadata)
for i, (storage_info, metadata, make_cached) in enumerate(
zip(
self.output_storage_alias,
self.outputs_metadata,
self.unaliased_in_all_paths,
)
):
if not make_cached:
self.cached_tensor_outputs.append(None)
continue
assert storage_info is UnaliasedStorage
assert isinstance(metadata, dict)
s = self.create_storage(metadata)
out = self._reconstruct_from_tensor_metadata(metadata, storage=s) # type: ignore[arg-type]
# XXX: let autograd know that there will be an additional reference to the tensor
# that can be ignored when deciding whether to do gradient buffer inplacing.
# Otherwise, inplacing could differ between tracing and subsequent execution.
# For some models we tested this led to inputs no longer being in cudagraph pools,
# leading to spurious re-recordings.
# It also tells AMP cache that even though the tensor impls cannot be cached
# in dtype conversions.
torch._C._add_cached_tensor(out)
self_ref = weakref.ref(self)
# one reference in our array, and calling sys.getrefcount bumps the refcount by one
def check_refcount(i: int) -> bool:
self_loc = self_ref()
if self_loc is None:
return False
refcount = self_loc.get_output_refcount(i)
# pyrefly: ignore
if self_loc.cached_tensor_outputs[i]._use_count() > 1:
# c10::Tensor may also holds one reference count
assert refcount >= 3
return refcount == 3
else:
assert refcount >= 2
return refcount == 2
check = functools.partial(check_refcount, i=i)
self.outputs_weakrefs[i] = StorageWeakRefWrapper(out, extra_ref_check=check)
self.cached_tensor_outputs.append(out)
def get_output_refcount(self, index: int) -> int:
return sys.getrefcount(self.cached_tensor_outputs[index])
@property
def parent(self) -> Optional[CUDAGraphNode]:
"unwraps the weakref to _parent"
return self._parent() if self._parent is not None else None
@property
def _path_to_root(self) -> Generator[CUDAGraphNode, None, None]:
"Returns all nodes in the path starting at self and ending at root"
node = self
while node:
yield node
node = node.parent # type: ignore[assignment]
@property
def _path_from_root(self) -> Generator[CUDAGraphNode, None, None]:
"Returns all nodes in the path starting at the root and ending at self"
nodes = reversed(list(self._path_to_root))
yield from nodes
def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor) -> bool:
"Is this tensor an output of a node in this path"
for output_refs in self.path_weakrefs:
for storage_weak_ref in output_refs:
if storage_weak_ref is None:
continue
# don't need to check liveness of storage since the cuda graph managed
# memory is never released.
data_ptr = storage_weak_ref.data_ptr()
if t.untyped_storage().data_ptr() == data_ptr:
return True
return False
def _is_alias_of_live_recorded_tensor(
self, t: torch.Tensor
) -> Optional[PathOutputIndex]:
for depth, output_refs in enumerate(self.path_weakrefs):
for output_index, storage_ref in enumerate(output_refs):
if (storage_and_ptr := maybe_deref(storage_ref)) is not None:
_storage, ptr = storage_and_ptr
if ptr == t.untyped_storage().data_ptr():
return (depth, output_index)
return None
@staticmethod
def _check_liveness(
indices: list[PathOutputIndex],
output_refs: list[list[Optional[StorageWeakRefWrapper]]],
) -> bool:
"Check that all of the indices specified are dead references"
for depth, output_index in indices:
w = output_refs[depth][output_index]
assert w is not None
if w() is not None:
return False
return True
def add_child(self, function_id: FunctionID, node: CUDAGraphNode) -> None:
"Adds node as a a child of self"
self.children[function_id].append(node)
@staticmethod
def _get_different_indices(
prev: list[list[bool]], curr: list[list[bool]]
) -> list[PathOutputIndex]:
"Find indices where the two lists differ."
dead_indices = []
assert len(prev) <= len(curr)
for i, (outputs1, outputs2) in enumerate(zip(prev, curr)):
assert len(outputs1) == len(outputs2)
for j, (output1, output2) in enumerate(zip(outputs1, outputs2)):
if output1 != output2:
dead_indices.append((i, j))
return dead_indices
@staticmethod
def _get_liveness(
weakrefs: list[list[Optional[StorageWeakRefWrapper]]],
) -> list[list[bool]]:
"Maps weakrefs to true if the reference is alive and false otherwise"
if len(weakrefs) == 0:
return []
return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]
def debug_assert_invariants(
self, expected_liveness: list[list[bool]], newly_dead: list[PathOutputIndex]
) -> None:
if not config.triton.fast_path_cudagraph_asserts:
return
for i, node in enumerate(self._path_from_root):
assert self.path_weakrefs[i] is node.outputs_weakrefs
nodes = list(self._path_from_root)
live_blocks = get_block_addrs(self.cuda_graphs_pool)
live_storage_data_ptrs = OrderedSet[Any]()
live_storage_weak_ptrs = OrderedSet[Any]()
for depth, outputs_liveness in enumerate(expected_liveness):
for output_idx, output_liveness in enumerate(outputs_liveness):
# tensor can die early, but it can't be alive when it should be dead
w = self.path_weakrefs[depth][output_idx]
if (stor_weak_ptr_and_data_ptr := maybe_deref(w)) is not None:
assert output_liveness
stor_weak_ptr, stor_data_ptr = stor_weak_ptr_and_data_ptr
assert (stor_data_ptr in live_storage_data_ptrs) == (
stor_weak_ptr in live_storage_weak_ptrs
)
live_storage_data_ptrs.add(stor_data_ptr)
live_storage_weak_ptrs.add(stor_weak_ptr)
is_persistent_alias = (
nodes[depth].static_output_tensors[output_idx] is not None
)
if is_persistent_alias:
assert stor_data_ptr not in live_blocks
for depth, output_index in newly_dead:
assert not is_live(self.path_weakrefs[depth][output_index])
def debug_check_invariants_before_invocation(self) -> None:
self.debug_assert_invariants(
self.recorded_liveness_before_graph, self.expected_dead_indices_before_graph
)
def debug_check_invariants_after_invocation(self) -> None:
self.debug_assert_invariants(
self.recorded_liveness_before_graph, self.expected_dead_indices_after_graph
)
def data_ptrs_dead_since_invocation(self) -> list[int]:
"""
Since this node was invoked, return data ptrs of all tensor outputs that have died
in the current executing tree path.
"""
curr_liveness = self._get_liveness(self.path_weakrefs)
_get_different_indices = self._get_different_indices(
self.recorded_liveness_after_graph, curr_liveness
)
path = list(self._path_from_root)
ptrs_to_deallocate = []
for depth, output_index in _get_different_indices:
ptrs_to_deallocate.append(
path[depth].outputs_metadata[output_index]["data_ptr"] # type: ignore[index]
)
return ptrs_to_deallocate
def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
for i, j in self.live_indices_after_graph:
out = self.path_weakrefs[i][j]
if out is not None and is_live(out):
yield out
def remove_node_cached_tensors(self) -> None:
for t in self.cached_tensor_outputs:
if t is not None:
torch._C._remove_cached_tensor(t)
self.cached_tensor_outputs.clear()
for i, unaliased in enumerate(self.unaliased_in_all_paths):
if unaliased:
n = self.outputs_weakrefs[i]
assert n is not None
n.remove_extra_reference()
def remove_path_cached_tensors(self) -> None:
for node in self._path_from_root:
node.remove_node_cached_tensors()
def clear_path_state(self) -> None:
"Clear the path state in this current executing node"
# this doesn't actually do anything right now, leaving it as placeholder
@staticmethod
def _tensor_metadata(
x: torch.Tensor, ignore_storage_offset: bool = True
) -> dict[str, Any]:
assert isinstance(x, torch.Tensor)
# We ignore the storage offset for inputs, but not for outputs
# TODO: - should we make the storage resizable ?
return {
"nbytes": x.untyped_storage().nbytes(),
"data_ptr": x.untyped_storage().data_ptr(),
"size": x.shape,
"stride": x.stride(),
"dtype": x.dtype,
"device": x.device,
"storage_offset": x.storage_offset() if not ignore_storage_offset else 0,
}
def _reconstruct_from_tensor_metadata(
self, metadata: dict[str, Any], storage: Optional[UntypedStorage] = None
) -> Tensor:
s = self.create_storage(metadata) if storage is None else storage
return torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata(metadata, s) # type: ignore[arg-type]
def create_storage(self, metadata: dict[str, Any]) -> torch.types.Storage:
return torch._C._construct_storage_from_data_pointer(
metadata["data_ptr"], metadata["device"], metadata["nbytes"]
)
def _allocate_and_copy_recording_inputs(
self, inputs: list[InputType]
) -> list[InputType]:
"""
Allocate inputs for non static, non cudagraph managed tensors in the memory pool
and copy over the tensor values.
"""
torch.cuda.synchronize()
self.stream.wait_stream(torch.cuda.current_stream())
recording_inputs: list[InputType] = []
with (
warnings.catch_warnings(record=True),
torch.cuda.device(self.device),
_use_cuda_memory_pool_manager(
self.device,
mem_pool=self.cuda_graphs_pool,
stream=self.stream,
),
):
for i, inp in enumerate(inputs):
if not isinstance(inp, torch.Tensor):
assert isinstance(inp, (int, torch.Generator))
# pyrefly: ignore [bad-argument-type]
recording_inputs.append(inp)
elif i not in self.static_input_idxs:
# static_input does an allocation!
recording_inputs.append(static_input(inp))
else:
recording_inputs.append(inp)
self._copy_inputs_and_remove_from_src(recording_inputs, inputs)
return recording_inputs
def check_invariants(
self, inputs: list[InputType]
) -> tuple[CheckInvariantStatus, Callable[..., str]]:
"""
Checks if this node can be run. The same pattern of tensor liveness, static inputs,
and tensors managed in the cudagraph private pool must remain stable.
"""
_logger = functools.partial(
log_data_ptr_mismatch,
self.wrapped_function.placeholders,
inputs,
self.static_input_data_ptrs,
)
# previously managed data pointers remain stable
# this is on the hot path so moved to C++. equivalent to:
# return all(t.data_ptr() == data_ptr for (t, data_ptr) in zip(tensors, data_ptrs))
if not torch._C._tensors_data_ptrs_at_indices_equal(
inputs, # type: ignore[arg-type]
self.static_input_data_ptrs,
self.cudagraph_managed_idxs,
):
status = CheckInvariantStatus.CudagraphManagedIdxMismatch
_logger = functools.partial(
_logger,
self.cudagraph_managed_idxs,
status,
)
return status, _logger
if not self._check_liveness(
self.expected_dead_indices_before_graph, self.path_weakrefs
):
status = CheckInvariantStatus.ExpectedDeadIndicesBeforeGraphMismatch
return status, lambda: f"{status}"
# static input data pointers should remain stable
# if we are inlining builtin nn modules we re-record in this case
# if we are not inlining builtin nn modules, we check this in check_static_inputs_are_stable
# and error if they are not stable
if (
self.rerecord_if_static_inputs_change
and not torch._C._tensors_data_ptrs_at_indices_equal(
inputs, # type: ignore[arg-type]
self.static_input_data_ptrs,
self.static_input_idxs,
)
):
status = CheckInvariantStatus.StaticInputIdxMismatch
_logger = functools.partial(
_logger,
self.static_input_idxs,
status,
)
return status, _logger
# the cudagraph managed tensors which died upon recording must also die upon
# this invocation. it is too late to check after we've replayed the graph,
# because we would have already written over their memory.
for idx in self.cudagraph_managed_idxs:
if not self.preserved_aliased_inputs[idx]:
inputs[idx] = None # type: ignore[call-overload]
torch._check(
self._check_liveness(
self.expected_dead_indices_after_graph, self.path_weakrefs
),
lambda: "TODO: graph recording observed an input tensor deallocate during graph "
" recording that did not occur during replay. Please file an issue.",
)
return CheckInvariantStatus.SUCCESS, lambda: f"{CheckInvariantStatus.SUCCESS}"
def num_descendants(self) -> int:
"Total number of descendents of this node"
num_desc = 0
for children in self.children.values():
for child in children:
num_desc += 1
num_desc += child.num_descendants()
return num_desc
def get_cudagraph_segments(pool_id: tuple[int, int]) -> Any:
segments = torch.cuda.memory_snapshot()
return [segment for segment in segments if segment["segment_pool_id"] == pool_id]
def get_block_addrs(pool_id: tuple[int, int], live_only: bool = True) -> list[int]:
blocks = []
for segment in get_cudagraph_segments(pool_id):
addr = segment["address"]
for block in segment["blocks"]:
if block["state"] == "active_allocated" or not live_only:
blocks.append(addr)
addr += block["size"]
return blocks
def format_tb(frames: list[Any]) -> str:
formatted_traceback = [
traceback.FrameSummary(entry["filename"], entry["line"], entry["name"])
for entry in frames
]
return "".join(traceback.format_list(formatted_traceback))
def check_memory_pool(
device: int,
pool_id: tuple[int, int],
live_storages_ptrs: list[StorageWeakRefWrapper],
) -> None:
assert all(isinstance(elem, StorageWeakRefWrapper) for elem in live_storages_ptrs) # noqa: C419
unique_storages = {stor.data_ptr() for stor in live_storages_ptrs if stor()} # noqa: set_linter
# check if there is a divergence first, then do the expensive snapshot call after
# we know it will error
if torch._C._cuda_checkPoolLiveAllocations(device, pool_id, unique_storages):
return
# at this point we are past the fast-path. we have seen rare cases where a dead tensor is dead,
# but hasn't been gc'd yet, and gives false positive for allocated_not_in_live_storages
gc.collect()
torch.cuda.synchronize()
segments = get_cudagraph_segments(pool_id)
allocated_not_in_live_storages = {}
for segment in segments:
addr = segment["address"]
for block in segment["blocks"]:
if block["state"] == "active_allocated":
if addr not in unique_storages:
allocated_not_in_live_storages[addr] = block
else:
unique_storages.remove(addr)
addr += block["size"]
torch._check(
len(unique_storages) == 0,
lambda: f"These storage data ptrs are not allocated in pool {pool_id} but should be {unique_storages}",
)
if len(allocated_not_in_live_storages) != 0:
formatted = []
for dp, block in allocated_not_in_live_storages.items():
trace = format_tb(block.get("frames", []))
# pyrefly: ignore [bad-argument-type]
formatted.append(f"Data Pointer: {dp}, history: \n{trace}")
formatted_s = "\n".join(formatted)
msg = (
f"These live storage data ptrs are in the cudagraph pool but not "
f"accounted for as an output of cudagraph trees: \n\n{formatted_s}"
)
raise RuntimeError(msg)
| CUDAGraphNode |
python | kubernetes-client__python | kubernetes/base/stream/ws_client.py | {
"start": 1161,
"end": 1367
} | class ____:
def write(self, _x):
pass
def getvalue(self):
raise TypeError("Tried to read_all() from a WSClient configured to not capture. Did you mean `capture_all=True`?")
| _IgnoredIO |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph_dfs/test_dfs.py | {
"start": 18,
"end": 820
} | class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDfs, self).__init__()
self.results = Results()
def test_dfs(self):
nodes = []
graph = GraphDfs()
for id in range(0, 6):
nodes.append(graph.add_node(id))
graph.add_edge(0, 1, 5)
graph.add_edge(0, 4, 3)
graph.add_edge(0, 5, 2)
graph.add_edge(1, 3, 5)
graph.add_edge(1, 4, 4)
graph.add_edge(2, 1, 6)
graph.add_edge(3, 2, 7)
graph.add_edge(3, 4, 8)
graph.dfs(nodes[0], self.results.add_result)
self.assertEqual(str(self.results), "[0, 1, 3, 2, 4, 5]")
print('Success: test_dfs')
def main():
test = TestDfs()
test.test_dfs()
if __name__ == '__main__':
main()
| TestDfs |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 5682,
"end": 5914
} | class ____(graphene.Enum):
SET_OBJECT = "SET_OBJECT"
GET_OBJECT = "GET_OBJECT"
RM_OBJECT = "RM_OBJECT"
CP_OBJECT = "CP_OBJECT"
class Meta:
name = "ObjectStoreOperationType"
| GrapheneObjectStoreOperationType |
python | ipython__ipython | tests/test_path.py | {
"start": 10572,
"end": 13520
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.filenames_start_with_a = ["a0", "a1", "a2"]
cls.filenames_end_with_b = ["0b", "1b", "2b"]
cls.filenames = cls.filenames_start_with_a + cls.filenames_end_with_b
cls.tempdir = TemporaryDirectory()
td = cls.tempdir.name
with cls.in_tempdir():
# Create empty files
for fname in cls.filenames:
open(os.path.join(td, fname), "w", encoding="utf-8").close()
@classmethod
def tearDownClass(cls):
cls.tempdir.cleanup()
@classmethod
@contextmanager
def in_tempdir(cls):
save = os.getcwd()
try:
os.chdir(cls.tempdir.name)
yield
finally:
os.chdir(save)
def check_match(self, patterns, matches):
with self.in_tempdir():
# glob returns unordered list. that's why sorted is required.
assert sorted(path.shellglob(patterns)) == sorted(matches)
def common_cases(self):
return [
(["*"], self.filenames),
(["a*"], self.filenames_start_with_a),
(["*c"], ["*c"]),
(
["*", "a*", "*b", "*c"],
self.filenames
+ self.filenames_start_with_a
+ self.filenames_end_with_b
+ ["*c"],
),
(["a[012]"], self.filenames_start_with_a),
]
@skip_win32
def test_match_posix(self):
for patterns, matches in self.common_cases() + [
([r"\*"], ["*"]),
([r"a\*", "a*"], ["a*"] + self.filenames_start_with_a),
([r"a\[012]"], ["a[012]"]),
]:
self.check_match(patterns, matches)
@skip_if_not_win32
def test_match_windows(self):
for patterns, matches in self.common_cases() + [
# In windows, backslash is interpreted as path
# separator. Therefore, you can't escape glob
# using it.
([r"a\*", "a*"], [r"a\*"] + self.filenames_start_with_a),
([r"a\[012]"], [r"a\[012]"]),
]:
self.check_match(patterns, matches)
@pytest.mark.parametrize(
"globstr, unescaped_globstr",
[
(r"\*\[\!\]\?", "*[!]?"),
(r"\\*", r"\*"),
(r"\\\*", r"\*"),
(r"\\a", r"\a"),
(r"\a", r"\a"),
],
)
def test_unescape_glob(globstr, unescaped_globstr):
assert path.unescape_glob(globstr) == unescaped_globstr
@onlyif_unicode_paths
def test_ensure_dir_exists():
with TemporaryDirectory() as td:
d = os.path.join(td, "∂ir")
path.ensure_dir_exists(d) # create it
assert os.path.isdir(d)
path.ensure_dir_exists(d) # no-op
f = os.path.join(td, "ƒile")
open(f, "w", encoding="utf-8").close() # touch
with pytest.raises(IOError):
path.ensure_dir_exists(f)
| TestShellGlob |
python | getsentry__sentry | src/sentry/middleware/ratelimit.py | {
"start": 1003,
"end": 7072
} | class ____:
"""Middleware that applies a rate limit to every endpoint.
See: https://docs.djangoproject.com/en/4.0/topics/http/middleware/#writing-your-own-middleware
"""
def __init__(self, get_response: Callable[[HttpRequest], HttpResponseBase]):
self.get_response = get_response
def __call__(self, request: HttpRequest) -> HttpResponseBase:
# process_view is automatically called by Django
with sentry_sdk.start_span(op="ratelimit.__call__"):
response = self.get_response(request)
self.process_response(request, response)
return response
def process_view(
self, request: HttpRequest, view_func, view_args, view_kwargs
) -> HttpResponseBase | None:
"""Check if the endpoint call will violate."""
with metrics.timer("middleware.ratelimit.process_view", sample_rate=0.01):
try:
# TODO: put these fields into their own object
request.will_be_rate_limited = False
if settings.SENTRY_SELF_HOSTED:
return None
request.rate_limit_category = None
request.rate_limit_uid = uuid.uuid4().hex
view_class = getattr(view_func, "view_class", None)
if not view_class:
return None
enforce_rate_limit = getattr(view_class, "enforce_rate_limit", False)
if enforce_rate_limit is False:
return None
rate_limit_config = get_rate_limit_config(
view_class, view_args, {**view_kwargs, "request": request}
)
rate_limit_group = (
rate_limit_config.group if rate_limit_config else RateLimitConfig().group
)
request.rate_limit_key = get_rate_limit_key(
view_func, request, rate_limit_group, rate_limit_config
)
if request.rate_limit_key is None:
return None
category_str = request.rate_limit_key.split(":", 1)[0]
request.rate_limit_category = category_str
rate_limit = get_rate_limit_value(
http_method=request.method,
category=RateLimitCategory(category_str),
rate_limit_config=rate_limit_config,
)
if rate_limit is None:
return None
request.rate_limit_metadata = above_rate_limit_check(
request.rate_limit_key, rate_limit, request.rate_limit_uid, rate_limit_group
)
# TODO: also limit by concurrent window once we have the data
rate_limit_cond = (
request.rate_limit_metadata.rate_limit_type != RateLimitType.NOT_LIMITED
if settings.ENFORCE_CONCURRENT_RATE_LIMITS
else request.rate_limit_metadata.rate_limit_type == RateLimitType.FIXED_WINDOW
)
if rate_limit_cond:
request.will_be_rate_limited = True
logger.info(
"sentry.api.rate-limit.exceeded",
extra={
"key": request.rate_limit_key,
"url": request.build_absolute_uri(),
"limit": request.rate_limit_metadata.limit,
"window": request.rate_limit_metadata.window,
},
)
if request.rate_limit_metadata.rate_limit_type == RateLimitType.FIXED_WINDOW:
response_text = DEFAULT_ERROR_MESSAGE.format(
limit=request.rate_limit_metadata.limit,
window=request.rate_limit_metadata.window,
)
else:
response_text = DEFAULT_CONCURRENT_ERROR_MESSAGE.format(
limit=request.rate_limit_metadata.concurrent_limit
)
response_json = {
"detail": response_text,
}
response = HttpResponse(orjson.dumps(response_json), status=429)
assert request.method is not None
return apply_cors_headers(
request=request, response=response, allowed_methods=[request.method]
)
except Exception:
logging.exception(
"Error during rate limiting, failing open. THIS SHOULD NOT HAPPEN"
)
return None
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
with metrics.timer("middleware.ratelimit.process_response", sample_rate=0.01):
try:
rate_limit_metadata: RateLimitMeta | None = getattr(
request, "rate_limit_metadata", None
)
if rate_limit_metadata:
response["X-Sentry-Rate-Limit-Remaining"] = rate_limit_metadata.remaining
response["X-Sentry-Rate-Limit-Limit"] = rate_limit_metadata.limit
response["X-Sentry-Rate-Limit-Reset"] = rate_limit_metadata.reset_time
response["X-Sentry-Rate-Limit-ConcurrentRemaining"] = (
rate_limit_metadata.concurrent_remaining
)
response["X-Sentry-Rate-Limit-ConcurrentLimit"] = (
rate_limit_metadata.concurrent_limit
)
if hasattr(request, "rate_limit_key") and hasattr(request, "rate_limit_uid"):
finish_request(request.rate_limit_key, request.rate_limit_uid)
except Exception:
logging.exception("COULD NOT POPULATE RATE LIMIT HEADERS")
return response
| RatelimitMiddleware |
python | plotly__plotly.py | plotly/graph_objs/histogram2dcontour/_contours.py | {
"start": 233,
"end": 14602
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.contours"
_valid_props = {
"coloring",
"end",
"labelfont",
"labelformat",
"operation",
"showlabels",
"showlines",
"size",
"start",
"type",
"value",
}
@property
def coloring(self):
"""
Determines the coloring method showing the contour values. If
"fill", coloring is done evenly between each contour level If
"heatmap", a heatmap gradient coloring is applied between each
contour level. If "lines", coloring is done on the contour
lines. If "none", no coloring is applied on this trace.
The 'coloring' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fill', 'heatmap', 'lines', 'none']
Returns
-------
Any
"""
return self["coloring"]
@coloring.setter
def coloring(self, val):
self["coloring"] = val
@property
def end(self):
"""
Sets the end contour level value. Must be more than
`contours.start`
The 'end' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
@property
def labelfont(self):
"""
Sets the font used for labeling the contour levels. The default
color comes from the lines, if shown. The default family and
size come from `layout.font`.
The 'labelfont' property is an instance of Labelfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.contours.Labelfont`
- A dict of string/value properties that will be passed
to the Labelfont constructor
Returns
-------
plotly.graph_objs.histogram2dcontour.contours.Labelfont
"""
return self["labelfont"]
@labelfont.setter
def labelfont(self, val):
self["labelfont"] = val
@property
def labelformat(self):
"""
Sets the contour label formatting rule using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
The 'labelformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["labelformat"]
@labelformat.setter
def labelformat(self, val):
self["labelformat"] = val
@property
def operation(self):
"""
Sets the constraint operation. "=" keeps regions equal to
`value` "<" and "<=" keep regions less than `value` ">" and
">=" keep regions greater than `value` "[]", "()", "[)", and
"(]" keep regions inside `value[0]` to `value[1]` "][", ")(",
"](", ")[" keep regions outside `value[0]` to value[1]` Open
vs. closed intervals make no difference to constraint display,
but all versions are allowed for consistency with filter
transforms.
The 'operation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['=', '<', '>=', '>', '<=', '[]', '()', '[)', '(]', '][',
')(', '](', ')[']
Returns
-------
Any
"""
return self["operation"]
@operation.setter
def operation(self, val):
self["operation"] = val
@property
def showlabels(self):
"""
Determines whether to label the contour lines with their
values.
The 'showlabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlabels"]
@showlabels.setter
def showlabels(self, val):
self["showlabels"] = val
@property
def showlines(self):
"""
Determines whether or not the contour lines are drawn. Has an
effect only if `contours.coloring` is set to "fill".
The 'showlines' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlines"]
@showlines.setter
def showlines(self, val):
self["showlines"] = val
@property
def size(self):
"""
Sets the step between each contour level. Must be positive.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def start(self):
"""
Sets the starting contour level value. Must be less than
`contours.end`
The 'start' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
@property
def type(self):
"""
If `levels`, the data is represented as a contour plot with
multiple levels displayed. If `constraint`, the data is
represented as constraints with the invalid region shaded as
specified by the `operation` and `value` parameters.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['levels', 'constraint']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def value(self):
"""
Sets the value or values of the constraint boundary. When
`operation` is set to one of the comparison values
(`=,<,>=,>,<=`) "value" is expected to be a number. When
`operation` is set to one of the interval values
(`[],(),[),(],][,)(,](,)[`) "value" is expected to be an array
of two numbers where the first is the lower bound and the
second is the upper bound.
The 'value' property accepts values of any type
Returns
-------
Any
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
coloring
Determines the coloring method showing the contour
values. If "fill", coloring is done evenly between each
contour level If "heatmap", a heatmap gradient coloring
is applied between each contour level. If "lines",
coloring is done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more than
`contours.start`
labelfont
Sets the font used for labeling the contour levels. The
default color comes from the lines, if shown. The
default family and size come from `layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
operation
Sets the constraint operation. "=" keeps regions equal
to `value` "<" and "<=" keep regions less than `value`
">" and ">=" keep regions greater than `value` "[]",
"()", "[)", and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions outside
`value[0]` to value[1]` Open vs. closed intervals make
no difference to constraint display, but all versions
are allowed for consistency with filter transforms.
showlabels
Determines whether to label the contour lines with
their values.
showlines
Determines whether or not the contour lines are drawn.
Has an effect only if `contours.coloring` is set to
"fill".
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
type
If `levels`, the data is represented as a contour plot
with multiple levels displayed. If `constraint`, the
data is represented as constraints with the invalid
region shaded as specified by the `operation` and
`value` parameters.
value
Sets the value or values of the constraint boundary.
When `operation` is set to one of the comparison values
(`=,<,>=,>,<=`) "value" is expected to be a number.
When `operation` is set to one of the interval values
(`[],(),[),(],][,)(,](,)[`) "value" is expected to be
an array of two numbers where the first is the lower
bound and the second is the upper bound.
"""
def __init__(
self,
arg=None,
coloring=None,
end=None,
labelfont=None,
labelformat=None,
operation=None,
showlabels=None,
showlines=None,
size=None,
start=None,
type=None,
value=None,
**kwargs,
):
"""
Construct a new Contours object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2dcontour.Contours`
coloring
Determines the coloring method showing the contour
values. If "fill", coloring is done evenly between each
contour level If "heatmap", a heatmap gradient coloring
is applied between each contour level. If "lines",
coloring is done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more than
`contours.start`
labelfont
Sets the font used for labeling the contour levels. The
default color comes from the lines, if shown. The
default family and size come from `layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
operation
Sets the constraint operation. "=" keeps regions equal
to `value` "<" and "<=" keep regions less than `value`
">" and ">=" keep regions greater than `value` "[]",
"()", "[)", and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions outside
`value[0]` to value[1]` Open vs. closed intervals make
no difference to constraint display, but all versions
are allowed for consistency with filter transforms.
showlabels
Determines whether to label the contour lines with
their values.
showlines
Determines whether or not the contour lines are drawn.
Has an effect only if `contours.coloring` is set to
"fill".
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
type
If `levels`, the data is represented as a contour plot
with multiple levels displayed. If `constraint`, the
data is represented as constraints with the invalid
region shaded as specified by the `operation` and
`value` parameters.
value
Sets the value or values of the constraint boundary.
When `operation` is set to one of the comparison values
(`=,<,>=,>,<=`) "value" is expected to be a number.
When `operation` is set to one of the interval values
(`[],(),[),(],][,)(,](,)[`) "value" is expected to be
an array of two numbers where the first is the lower
bound and the second is the upper bound.
Returns
-------
Contours
"""
super().__init__("contours")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.Contours
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Contours`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("coloring", arg, coloring)
self._set_property("end", arg, end)
self._set_property("labelfont", arg, labelfont)
self._set_property("labelformat", arg, labelformat)
self._set_property("operation", arg, operation)
self._set_property("showlabels", arg, showlabels)
self._set_property("showlines", arg, showlines)
self._set_property("size", arg, size)
self._set_property("start", arg, start)
self._set_property("type", arg, type)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Contours |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.